VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 105252

Last change on this file since 105252 was 105250, checked in by vboxsync, 7 months ago

VMM/IEM: invlpg assertion fix + optimization. bugref:10687

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 457.0 KB
Line 
1/* $Id: IEMAll.cpp 105250 2024-07-10 01:47:33Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_iem IEM - Interpreted Execution Manager
30 *
31 * The interpreted exeuction manager (IEM) is for executing short guest code
32 * sequences that are causing too many exits / virtualization traps. It will
33 * also be used to interpret single instructions, thus replacing the selective
34 * interpreters in EM and IOM.
35 *
36 * Design goals:
37 * - Relatively small footprint, although we favour speed and correctness
38 * over size.
39 * - Reasonably fast.
40 * - Correctly handle lock prefixed instructions.
41 * - Complete instruction set - eventually.
42 * - Refactorable into a recompiler, maybe.
43 * - Replace EMInterpret*.
44 *
45 * Using the existing disassembler has been considered, however this is thought
46 * to conflict with speed as the disassembler chews things a bit too much while
47 * leaving us with a somewhat complicated state to interpret afterwards.
48 *
49 *
50 * The current code is very much work in progress. You've been warned!
51 *
52 *
53 * @section sec_iem_fpu_instr FPU Instructions
54 *
55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
56 * same or equivalent instructions on the host FPU. To make life easy, we also
57 * let the FPU prioritize the unmasked exceptions for us. This however, only
58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
59 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
60 * can trigger spurious FPU exceptions.
61 *
62 * The guest FPU state is not loaded into the host CPU and kept there till we
63 * leave IEM because the calling conventions have declared an all year open
64 * season on much of the FPU state. For instance an innocent looking call to
65 * memcpy might end up using a whole bunch of XMM or MM registers if the
66 * particular implementation finds it worthwhile.
67 *
68 *
69 * @section sec_iem_logging Logging
70 *
71 * The IEM code uses the \"IEM\" log group for the main logging. The different
72 * logging levels/flags are generally used for the following purposes:
73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
74 * - Flow (LogFlow) : Basic enter/exit IEM state info.
75 * - Level 2 (Log2) : ?
76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
78 * - Level 5 (Log5) : Decoding details.
79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
80 * - Level 7 (Log7) : iret++ execution logging.
81 * - Level 8 (Log8) :
82 * - Level 9 (Log9) :
83 * - Level 10 (Log10): TLBs.
84 * - Level 11 (Log11): Unmasked FPU exceptions.
85 *
86 * The \"IEM_MEM\" log group covers most of memory related details logging,
87 * except for errors and exceptions:
88 * - Level 1 (Log) : Reads.
89 * - Level 2 (Log2) : Read fallbacks.
90 * - Level 3 (Log3) : MemMap read.
91 * - Level 4 (Log4) : MemMap read fallbacks.
92 * - Level 5 (Log5) : Writes
93 * - Level 6 (Log6) : Write fallbacks.
94 * - Level 7 (Log7) : MemMap writes and read-writes.
95 * - Level 8 (Log8) : MemMap write and read-write fallbacks.
96 * - Level 9 (Log9) : Stack reads.
97 * - Level 10 (Log10): Stack read fallbacks.
98 * - Level 11 (Log11): Stack writes.
99 * - Level 12 (Log12): Stack write fallbacks.
100 * - Flow (LogFlow) :
101 *
102 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
103 * - Level 1 (Log) : Errors and other major events.
104 * - Flow (LogFlow) : Misc flow stuff (cleanup?)
105 * - Level 2 (Log2) : VM exits.
106 *
107 * The syscall logging level assignments:
108 * - Level 1: DOS and BIOS.
109 * - Level 2: Windows 3.x
110 * - Level 3: Linux.
111 */
112
113/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
114#ifdef _MSC_VER
115# pragma warning(disable:4505)
116#endif
117
118
119/*********************************************************************************************************************************
120* Header Files *
121*********************************************************************************************************************************/
122#define LOG_GROUP LOG_GROUP_IEM
123#define VMCPU_INCL_CPUM_GST_CTX
124#include <VBox/vmm/iem.h>
125#include <VBox/vmm/cpum.h>
126#include <VBox/vmm/apic.h>
127#include <VBox/vmm/pdm.h>
128#include <VBox/vmm/pgm.h>
129#include <VBox/vmm/iom.h>
130#include <VBox/vmm/em.h>
131#include <VBox/vmm/hm.h>
132#include <VBox/vmm/nem.h>
133#include <VBox/vmm/gcm.h>
134#include <VBox/vmm/gim.h>
135#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
136# include <VBox/vmm/em.h>
137# include <VBox/vmm/hm_svm.h>
138#endif
139#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
140# include <VBox/vmm/hmvmxinline.h>
141#endif
142#include <VBox/vmm/tm.h>
143#include <VBox/vmm/dbgf.h>
144#include <VBox/vmm/dbgftrace.h>
145#include "IEMInternal.h"
146#include <VBox/vmm/vmcc.h>
147#include <VBox/log.h>
148#include <VBox/err.h>
149#include <VBox/param.h>
150#include <VBox/dis.h>
151#include <iprt/asm-math.h>
152#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
153# include <iprt/asm-amd64-x86.h>
154#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
155# include <iprt/asm-arm.h>
156#endif
157#include <iprt/assert.h>
158#include <iprt/string.h>
159#include <iprt/x86.h>
160
161#include "IEMInline.h"
162
163
164/*********************************************************************************************************************************
165* Structures and Typedefs *
166*********************************************************************************************************************************/
167/**
168 * CPU exception classes.
169 */
170typedef enum IEMXCPTCLASS
171{
172 IEMXCPTCLASS_BENIGN,
173 IEMXCPTCLASS_CONTRIBUTORY,
174 IEMXCPTCLASS_PAGE_FAULT,
175 IEMXCPTCLASS_DOUBLE_FAULT
176} IEMXCPTCLASS;
177
178
179/*********************************************************************************************************************************
180* Global Variables *
181*********************************************************************************************************************************/
182#if defined(IEM_LOG_MEMORY_WRITES)
183/** What IEM just wrote. */
184uint8_t g_abIemWrote[256];
185/** How much IEM just wrote. */
186size_t g_cbIemWrote;
187#endif
188
189
190/*********************************************************************************************************************************
191* Internal Functions *
192*********************************************************************************************************************************/
193static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
194 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
195
196
197/**
198 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags, slow code
199 * path.
200 *
201 * This will also invalidate TLB entries for any pages with active data
202 * breakpoints on them.
203 *
204 * @returns IEM_F_BRK_PENDING_XXX or zero.
205 * @param pVCpu The cross context virtual CPU structure of the
206 * calling thread.
207 *
208 * @note Don't call directly, use iemCalcExecDbgFlags instead.
209 */
210uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu)
211{
212 uint32_t fExec = 0;
213
214 /*
215 * Helper for invalidate the data TLB for breakpoint addresses.
216 *
217 * This is to make sure any access to the page will always trigger a TLB
218 * load for as long as the breakpoint is enabled.
219 */
220#ifdef IEM_WITH_DATA_TLB
221# define INVALID_TLB_ENTRY_FOR_BP(a_uValue) do { \
222 RTGCPTR uTagNoRev = (a_uValue); \
223 uTagNoRev = IEMTLB_CALC_TAG_NO_REV(uTagNoRev); \
224 uintptr_t const idxEven = IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev); \
225 if (pVCpu->iem.s.DataTlb.aEntries[idxEven].uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)) \
226 pVCpu->iem.s.DataTlb.aEntries[idxEven].uTag = 0; \
227 if (pVCpu->iem.s.DataTlb.aEntries[idxEven + 1].uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)) \
228 pVCpu->iem.s.DataTlb.aEntries[idxEven + 1].uTag = 0; \
229 } while (0)
230#else
231# define INVALID_TLB_ENTRY_FOR_BP(a_uValue) do { } while (0)
232#endif
233
234 /*
235 * Process guest breakpoints.
236 */
237#define PROCESS_ONE_BP(a_fDr7, a_iBp, a_uValue) do { \
238 if (a_fDr7 & X86_DR7_L_G(a_iBp)) \
239 { \
240 switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \
241 { \
242 case X86_DR7_RW_EO: \
243 fExec |= IEM_F_PENDING_BRK_INSTR; \
244 break; \
245 case X86_DR7_RW_WO: \
246 case X86_DR7_RW_RW: \
247 fExec |= IEM_F_PENDING_BRK_DATA; \
248 INVALID_TLB_ENTRY_FOR_BP(a_uValue); \
249 break; \
250 case X86_DR7_RW_IO: \
251 fExec |= IEM_F_PENDING_BRK_X86_IO; \
252 break; \
253 } \
254 } \
255 } while (0)
256
257 uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];
258 if (fGstDr7 & X86_DR7_ENABLED_MASK)
259 {
260/** @todo extract more details here to simplify matching later. */
261#ifdef IEM_WITH_DATA_TLB
262 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
263#endif
264 PROCESS_ONE_BP(fGstDr7, 0, pVCpu->cpum.GstCtx.dr[0]);
265 PROCESS_ONE_BP(fGstDr7, 1, pVCpu->cpum.GstCtx.dr[1]);
266 PROCESS_ONE_BP(fGstDr7, 2, pVCpu->cpum.GstCtx.dr[2]);
267 PROCESS_ONE_BP(fGstDr7, 3, pVCpu->cpum.GstCtx.dr[3]);
268 }
269
270 /*
271 * Process hypervisor breakpoints.
272 */
273 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
274 uint32_t const fHyperDr7 = DBGFBpGetDR7(pVM);
275 if (fHyperDr7 & X86_DR7_ENABLED_MASK)
276 {
277/** @todo extract more details here to simplify matching later. */
278 PROCESS_ONE_BP(fHyperDr7, 0, DBGFBpGetDR0(pVM));
279 PROCESS_ONE_BP(fHyperDr7, 1, DBGFBpGetDR1(pVM));
280 PROCESS_ONE_BP(fHyperDr7, 2, DBGFBpGetDR2(pVM));
281 PROCESS_ONE_BP(fHyperDr7, 3, DBGFBpGetDR3(pVM));
282 }
283
284 return fExec;
285}
286
287
288/**
289 * Initializes the decoder state.
290 *
291 * iemReInitDecoder is mostly a copy of this function.
292 *
293 * @param pVCpu The cross context virtual CPU structure of the
294 * calling thread.
295 * @param fExecOpts Optional execution flags:
296 * - IEM_F_BYPASS_HANDLERS
297 * - IEM_F_X86_DISREGARD_LOCK
298 */
299DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, uint32_t fExecOpts)
300{
301 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
302 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
303 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
304 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
305 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
306 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
307 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
308 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
309 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
310 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
311
312 /* Execution state: */
313 uint32_t fExec;
314 pVCpu->iem.s.fExec = fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
315
316 /* Decoder state: */
317 pVCpu->iem.s.enmDefAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
318 pVCpu->iem.s.enmEffAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK;
319 if ((fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)
320 {
321 pVCpu->iem.s.enmDefOpSize = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
322 pVCpu->iem.s.enmEffOpSize = fExec & IEM_F_MODE_CPUMODE_MASK;
323 }
324 else
325 {
326 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
327 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
328 }
329 pVCpu->iem.s.fPrefixes = 0;
330 pVCpu->iem.s.uRexReg = 0;
331 pVCpu->iem.s.uRexB = 0;
332 pVCpu->iem.s.uRexIndex = 0;
333 pVCpu->iem.s.idxPrefix = 0;
334 pVCpu->iem.s.uVex3rdReg = 0;
335 pVCpu->iem.s.uVexLength = 0;
336 pVCpu->iem.s.fEvexStuff = 0;
337 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
338#ifdef IEM_WITH_CODE_TLB
339 pVCpu->iem.s.pbInstrBuf = NULL;
340 pVCpu->iem.s.offInstrNextByte = 0;
341 pVCpu->iem.s.offCurInstrStart = 0;
342# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
343 pVCpu->iem.s.offOpcode = 0;
344# endif
345# ifdef VBOX_STRICT
346 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
347 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
348 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
349 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
350# endif
351#else
352 pVCpu->iem.s.offOpcode = 0;
353 pVCpu->iem.s.cbOpcode = 0;
354#endif
355 pVCpu->iem.s.offModRm = 0;
356 pVCpu->iem.s.cActiveMappings = 0;
357 pVCpu->iem.s.iNextMapping = 0;
358 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
359
360#ifdef DBGFTRACE_ENABLED
361 switch (IEM_GET_CPU_MODE(pVCpu))
362 {
363 case IEMMODE_64BIT:
364 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
365 break;
366 case IEMMODE_32BIT:
367 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
368 break;
369 case IEMMODE_16BIT:
370 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
371 break;
372 }
373#endif
374}
375
376
377/**
378 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
379 *
380 * This is mostly a copy of iemInitDecoder.
381 *
382 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
383 */
384DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
385{
386 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
387 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
388 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
389 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
390 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
391 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
392 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
393 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
394 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
395
396 /* ASSUMES: Anyone changing CPU state affecting the fExec bits will update them! */
397 AssertMsg((pVCpu->iem.s.fExec & ~IEM_F_USER_OPTS) == iemCalcExecFlags(pVCpu),
398 ("fExec=%#x iemCalcExecModeFlags=%#x\n", pVCpu->iem.s.fExec, iemCalcExecFlags(pVCpu)));
399
400 IEMMODE const enmMode = IEM_GET_CPU_MODE(pVCpu);
401 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
402 pVCpu->iem.s.enmEffAddrMode = enmMode;
403 if (enmMode != IEMMODE_64BIT)
404 {
405 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
406 pVCpu->iem.s.enmEffOpSize = enmMode;
407 }
408 else
409 {
410 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
411 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
412 }
413 pVCpu->iem.s.fPrefixes = 0;
414 pVCpu->iem.s.uRexReg = 0;
415 pVCpu->iem.s.uRexB = 0;
416 pVCpu->iem.s.uRexIndex = 0;
417 pVCpu->iem.s.idxPrefix = 0;
418 pVCpu->iem.s.uVex3rdReg = 0;
419 pVCpu->iem.s.uVexLength = 0;
420 pVCpu->iem.s.fEvexStuff = 0;
421 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
422#ifdef IEM_WITH_CODE_TLB
423 if (pVCpu->iem.s.pbInstrBuf)
424 {
425 uint64_t off = (enmMode == IEMMODE_64BIT
426 ? pVCpu->cpum.GstCtx.rip
427 : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
428 - pVCpu->iem.s.uInstrBufPc;
429 if (off < pVCpu->iem.s.cbInstrBufTotal)
430 {
431 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
432 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
433 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
434 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
435 else
436 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
437 }
438 else
439 {
440 pVCpu->iem.s.pbInstrBuf = NULL;
441 pVCpu->iem.s.offInstrNextByte = 0;
442 pVCpu->iem.s.offCurInstrStart = 0;
443 pVCpu->iem.s.cbInstrBuf = 0;
444 pVCpu->iem.s.cbInstrBufTotal = 0;
445 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
446 }
447 }
448 else
449 {
450 pVCpu->iem.s.offInstrNextByte = 0;
451 pVCpu->iem.s.offCurInstrStart = 0;
452 pVCpu->iem.s.cbInstrBuf = 0;
453 pVCpu->iem.s.cbInstrBufTotal = 0;
454# ifdef VBOX_STRICT
455 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
456# endif
457 }
458# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
459 pVCpu->iem.s.offOpcode = 0;
460# endif
461#else /* !IEM_WITH_CODE_TLB */
462 pVCpu->iem.s.cbOpcode = 0;
463 pVCpu->iem.s.offOpcode = 0;
464#endif /* !IEM_WITH_CODE_TLB */
465 pVCpu->iem.s.offModRm = 0;
466 Assert(pVCpu->iem.s.cActiveMappings == 0);
467 pVCpu->iem.s.iNextMapping = 0;
468 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
469 Assert(!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS));
470
471#ifdef DBGFTRACE_ENABLED
472 switch (enmMode)
473 {
474 case IEMMODE_64BIT:
475 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
476 break;
477 case IEMMODE_32BIT:
478 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
479 break;
480 case IEMMODE_16BIT:
481 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
482 break;
483 }
484#endif
485}
486
487
488
489/**
490 * Prefetch opcodes the first time when starting executing.
491 *
492 * @returns Strict VBox status code.
493 * @param pVCpu The cross context virtual CPU structure of the
494 * calling thread.
495 * @param fExecOpts Optional execution flags:
496 * - IEM_F_BYPASS_HANDLERS
497 * - IEM_F_X86_DISREGARD_LOCK
498 */
499static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
500{
501 iemInitDecoder(pVCpu, fExecOpts);
502
503#ifndef IEM_WITH_CODE_TLB
504 /*
505 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
506 *
507 * First translate CS:rIP to a physical address.
508 *
509 * Note! The iemOpcodeFetchMoreBytes code depends on this here code to fetch
510 * all relevant bytes from the first page, as it ASSUMES it's only ever
511 * called for dealing with CS.LIM, page crossing and instructions that
512 * are too long.
513 */
514 uint32_t cbToTryRead;
515 RTGCPTR GCPtrPC;
516 if (IEM_IS_64BIT_CODE(pVCpu))
517 {
518 cbToTryRead = GUEST_PAGE_SIZE;
519 GCPtrPC = pVCpu->cpum.GstCtx.rip;
520 if (IEM_IS_CANONICAL(GCPtrPC))
521 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
522 else
523 return iemRaiseGeneralProtectionFault0(pVCpu);
524 }
525 else
526 {
527 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
528 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu), ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
529 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
530 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
531 else
532 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
533 if (cbToTryRead) { /* likely */ }
534 else /* overflowed */
535 {
536 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
537 cbToTryRead = UINT32_MAX;
538 }
539 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
540 Assert(GCPtrPC <= UINT32_MAX);
541 }
542
543 PGMPTWALKFAST WalkFast;
544 int rc = PGMGstQueryPageFast(pVCpu, GCPtrPC,
545 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
546 &WalkFast);
547 if (RT_SUCCESS(rc))
548 Assert(WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED);
549 else
550 {
551 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
552# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
553/** @todo This isn't quite right yet, as PGM_GST_SLAT_NAME_EPT(Walk) doesn't
554 * know about what kind of access we're making! See PGM_GST_NAME(WalkFast). */
555 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
556 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
557# endif
558 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);
559 }
560#if 0
561 if ((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3) { /* likely */ }
562 else
563 {
564 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
565# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
566/** @todo this is completely wrong for EPT. WalkFast.fFailed is always zero here!*/
567# error completely wrong
568 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
569 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
570# endif
571 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
572 }
573 if (!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
574 else
575 {
576 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
577# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
578/** @todo this is completely wrong for EPT. WalkFast.fFailed is always zero here!*/
579# error completely wrong.
580 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
581 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
582# endif
583 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
584 }
585#else
586 Assert((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3);
587 Assert(!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
588#endif
589 RTGCPHYS const GCPhys = WalkFast.GCPhys;
590
591 /*
592 * Read the bytes at this address.
593 */
594 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
595 if (cbToTryRead > cbLeftOnPage)
596 cbToTryRead = cbLeftOnPage;
597 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
598 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
599
600 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
601 {
602 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
603 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
604 { /* likely */ }
605 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
606 {
607 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
608 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
609 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
610 }
611 else
612 {
613 Log((RT_SUCCESS(rcStrict)
614 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
615 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
616 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
617 return rcStrict;
618 }
619 }
620 else
621 {
622 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
623 if (RT_SUCCESS(rc))
624 { /* likely */ }
625 else
626 {
627 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
628 GCPtrPC, GCPhys, rc, cbToTryRead));
629 return rc;
630 }
631 }
632 pVCpu->iem.s.cbOpcode = cbToTryRead;
633#endif /* !IEM_WITH_CODE_TLB */
634 return VINF_SUCCESS;
635}
636
637
638#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
639/**
640 * Worker for iemTlbInvalidateAll.
641 */
642template<bool a_fGlobal>
643DECL_FORCE_INLINE(void) iemTlbInvalidateOne(IEMTLB *pTlb)
644{
645 if (!a_fGlobal)
646 pTlb->cTlsFlushes++;
647 else
648 pTlb->cTlsGlobalFlushes++;
649
650 pTlb->uTlbRevision += IEMTLB_REVISION_INCR;
651 if (RT_LIKELY(pTlb->uTlbRevision != 0))
652 { /* very likely */ }
653 else
654 {
655 pTlb->uTlbRevision = IEMTLB_REVISION_INCR;
656 pTlb->cTlbRevisionRollovers++;
657 unsigned i = RT_ELEMENTS(pTlb->aEntries) / 2;
658 while (i-- > 0)
659 pTlb->aEntries[i * 2].uTag = 0;
660 }
661 if (a_fGlobal)
662 {
663 pTlb->uTlbRevisionGlobal += IEMTLB_REVISION_INCR;
664 if (RT_LIKELY(pTlb->uTlbRevisionGlobal != 0))
665 { /* very likely */ }
666 else
667 {
668 pTlb->uTlbRevisionGlobal = IEMTLB_REVISION_INCR;
669 pTlb->cTlbRevisionRollovers++;
670 unsigned i = RT_ELEMENTS(pTlb->aEntries) / 2;
671 while (i-- > 0)
672 pTlb->aEntries[i * 2 + 1].uTag = 0;
673 }
674 }
675}
676#endif
677
678
679/**
680 * Worker for IEMTlbInvalidateAll and IEMTlbInvalidateAllGlobal.
681 */
682template<bool a_fGlobal>
683DECL_FORCE_INLINE(void) iemTlbInvalidateAll(PVMCPUCC pVCpu)
684{
685#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
686 Log10(("IEMTlbInvalidateAll\n"));
687
688# ifdef IEM_WITH_CODE_TLB
689 pVCpu->iem.s.cbInstrBufTotal = 0;
690 iemTlbInvalidateOne<a_fGlobal>(&pVCpu->iem.s.CodeTlb);
691# endif
692
693# ifdef IEM_WITH_DATA_TLB
694 iemTlbInvalidateOne<a_fGlobal>(&pVCpu->iem.s.DataTlb);
695# endif
696#else
697 RT_NOREF(pVCpu);
698#endif
699}
700
701
702/**
703 * Invalidates non-global the IEM TLB entries.
704 *
705 * This is called internally as well as by PGM when moving GC mappings.
706 *
707 * @param pVCpu The cross context virtual CPU structure of the calling
708 * thread.
709 */
710VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
711{
712 iemTlbInvalidateAll<false>(pVCpu);
713}
714
715
716/**
717 * Invalidates all the IEM TLB entries.
718 *
719 * This is called internally as well as by PGM when moving GC mappings.
720 *
721 * @param pVCpu The cross context virtual CPU structure of the calling
722 * thread.
723 */
724VMM_INT_DECL(void) IEMTlbInvalidateAllGlobal(PVMCPUCC pVCpu)
725{
726 iemTlbInvalidateAll<true>(pVCpu);
727}
728
729
730#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
731template<bool a_fDataTlb>
732DECLINLINE(void) iemTlbInvalidatePageWorker(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR GCPtrTag, uintptr_t idxEven)
733{
734 /*
735 * Flush the entry pair.
736 *
737 * We ASSUME that the guest hasn't tricked us into loading one of these
738 * from a large page and the other from a regular 4KB page. This is made
739 * much less of a problem, in that the guest would also have to flip the
740 * G bit to accomplish this.
741 */
742 int fMaybeLargePage = -1;
743 if (pTlb->aEntries[idxEven].uTag == (GCPtrTag | pTlb->uTlbRevision))
744 {
745 pTlb->aEntries[idxEven].uTag = 0;
746 fMaybeLargePage = RT_BOOL(pTlb->aEntries[idxEven].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE);
747 if (!a_fDataTlb && GCPtrTag == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
748 pVCpu->iem.s.cbInstrBufTotal = 0;
749 }
750 if (pTlb->aEntries[idxEven + 1].uTag == (GCPtrTag | pTlb->uTlbRevisionGlobal))
751 {
752 pTlb->aEntries[idxEven + 1].uTag = 0;
753 fMaybeLargePage = RT_BOOL(pTlb->aEntries[idxEven].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE);
754 if (!a_fDataTlb && GCPtrTag == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
755 pVCpu->iem.s.cbInstrBufTotal = 0;
756 }
757
758 /*
759 * If we cannot rule out a large page, we have to scan all the 4K TLB
760 * entries such a page covers to ensure we evict all relevant entries.
761 * ASSUMES that tag calculation is a right shift by GUEST_PAGE_SHIFT.
762 */
763 if (fMaybeLargePage)
764 {
765 AssertCompile(IEMTLB_CALC_TAG_NO_REV((RTGCPTR)0x8731U << GUEST_PAGE_SHIFT) == 0x8731U);
766 RTGCPTR const GCPtrInstrBufPcTag = IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc);
767 if ((pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE) && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
768 {
769 /* 2MB large page */
770 GCPtrTag &= ~(RTGCPTR)(RT_BIT_64(21 - GUEST_PAGE_SHIFT) - 1U);
771 RTGCPTR GCPtrTagGlob = GCPtrTag | pTlb->uTlbRevisionGlobal;
772 GCPtrTag |= pTlb->uTlbRevision;
773
774# if IEMTLB_ENTRY_COUNT >= 512
775 idxEven = IEMTLB_TAG_TO_EVEN_INDEX(GCPtrTag);
776 RTGCPTR const GCPtrTagMask = ~(RTGCPTR)0;
777 uintptr_t const idxEvenEnd = idxEven + 512;
778# else
779 RTGCPTR const GCPtrTagMask = ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK
780 & ~(RTGCPTR)( (RT_BIT_64(9 - IEMTLB_ENTRY_COUNT_AS_POWER_OF_TWO) - 1U)
781 << IEMTLB_ENTRY_COUNT_AS_POWER_OF_TWO);
782 uintptr_t const idxEvenEnd = IEMTLB_ENTRY_COUNT;
783# endif
784 for (idxEven = 0; idxEven < idxEvenEnd; idxEven += 2)
785 {
786 if ((pTlb->aEntries[idxEven].uTag & GCPtrTagMask) == GCPtrTag)
787 {
788 if (pTlb->aEntries[idxEven].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE)
789 {
790 pTlb->aEntries[idxEven].uTag = 0;
791 if (!a_fDataTlb && GCPtrTag == GCPtrInstrBufPcTag)
792 pVCpu->iem.s.cbInstrBufTotal = 0;
793 fMaybeLargePage = true;
794 }
795 else
796 {
797 Assert(fMaybeLargePage == -1);
798 break;
799 }
800 }
801 if ((pTlb->aEntries[idxEven + 1].uTag & GCPtrTagMask) == GCPtrTagGlob)
802 {
803 if (pTlb->aEntries[idxEven + 1].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE)
804 {
805 pTlb->aEntries[idxEven + 1].uTag = 0;
806 if (!a_fDataTlb && GCPtrTag == GCPtrInstrBufPcTag)
807 pVCpu->iem.s.cbInstrBufTotal = 0;
808 fMaybeLargePage = true;
809 }
810 else
811 {
812 Assert(fMaybeLargePage == -1);
813 break;
814 }
815 }
816 GCPtrTag++;
817 GCPtrTagGlob++;
818 }
819 }
820 else
821 {
822 /* 4MB large page */
823 GCPtrTag &= ~(RTGCPTR)(RT_BIT_64(22 - GUEST_PAGE_SHIFT) - 1U);
824 RTGCPTR GCPtrTagGlob = GCPtrTag | pTlb->uTlbRevisionGlobal;
825 GCPtrTag |= pTlb->uTlbRevision;
826
827# if IEMTLB_ENTRY_COUNT >= 1024
828 idxEven = IEMTLB_TAG_TO_EVEN_INDEX(GCPtrTag);
829 RTGCPTR const GCPtrTagMask = ~(RTGCPTR)0;
830 uintptr_t const idxEvenEnd = idxEven + 1024;
831# else
832 RTGCPTR const GCPtrTagMask = ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK
833 & ~(RTGCPTR)( (RT_BIT_64(10 - IEMTLB_ENTRY_COUNT_AS_POWER_OF_TWO) - 1U)
834 << IEMTLB_ENTRY_COUNT_AS_POWER_OF_TWO);
835 uintptr_t const idxEvenEnd = IEMTLB_ENTRY_COUNT;
836# endif
837 for (idxEven = 0; idxEven < idxEvenEnd; idxEven += 2)
838 {
839 if ((pTlb->aEntries[idxEven].uTag & GCPtrTagMask) == GCPtrTag)
840 {
841 if (pTlb->aEntries[idxEven].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE)
842 {
843 pTlb->aEntries[idxEven].uTag = 0;
844 if (!a_fDataTlb && GCPtrTag == GCPtrInstrBufPcTag)
845 pVCpu->iem.s.cbInstrBufTotal = 0;
846 fMaybeLargePage = true;
847 }
848 else
849 {
850 Assert(fMaybeLargePage == -1);
851 break;
852 }
853 }
854 if ((pTlb->aEntries[idxEven + 1].uTag & GCPtrTagMask) == GCPtrTagGlob)
855 {
856 if (pTlb->aEntries[idxEven + 1].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE)
857 {
858 pTlb->aEntries[idxEven + 1].uTag = 0;
859 if (!a_fDataTlb && GCPtrTag == GCPtrInstrBufPcTag)
860 pVCpu->iem.s.cbInstrBufTotal = 0;
861 fMaybeLargePage = true;
862 }
863 else
864 {
865 Assert(fMaybeLargePage == -1);
866 break;
867 }
868 }
869 GCPtrTag++;
870 GCPtrTagGlob++;
871 }
872 }
873 }
874}
875#endif
876
877
878/**
879 * Invalidates a page in the TLBs.
880 *
881 * @param pVCpu The cross context virtual CPU structure of the calling
882 * thread.
883 * @param GCPtr The address of the page to invalidate
884 * @thread EMT(pVCpu)
885 */
886VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
887{
888#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
889 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
890 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
891 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
892 uintptr_t const idxEven = IEMTLB_TAG_TO_EVEN_INDEX(GCPtr);
893
894# ifdef IEM_WITH_CODE_TLB
895 iemTlbInvalidatePageWorker<false>(pVCpu, &pVCpu->iem.s.CodeTlb, GCPtr, idxEven);
896# endif
897# ifdef IEM_WITH_DATA_TLB
898 iemTlbInvalidatePageWorker<true>(pVCpu, &pVCpu->iem.s.DataTlb, GCPtr, idxEven);
899# endif
900#else
901 NOREF(pVCpu); NOREF(GCPtr);
902#endif
903}
904
905
906#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
907/**
908 * Invalid both TLBs slow fashion following a rollover.
909 *
910 * Worker for IEMTlbInvalidateAllPhysical,
911 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
912 * iemMemMapJmp and others.
913 *
914 * @thread EMT(pVCpu)
915 */
916static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
917{
918 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
919 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
920 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
921
922 unsigned i;
923# ifdef IEM_WITH_CODE_TLB
924 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
925 while (i-- > 0)
926 {
927 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
928 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
929 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
930 }
931 pVCpu->iem.s.CodeTlb.cTlbPhysRevRollovers++;
932 pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
933# endif
934# ifdef IEM_WITH_DATA_TLB
935 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
936 while (i-- > 0)
937 {
938 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
939 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
940 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
941 }
942 pVCpu->iem.s.DataTlb.cTlbPhysRevRollovers++;
943 pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
944# endif
945
946}
947#endif
948
949
950/**
951 * Invalidates the host physical aspects of the IEM TLBs.
952 *
953 * This is called internally as well as by PGM when moving GC mappings.
954 *
955 * @param pVCpu The cross context virtual CPU structure of the calling
956 * thread.
957 * @note Currently not used.
958 */
959VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
960{
961#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
962 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
963 Log10(("IEMTlbInvalidateAllPhysical\n"));
964
965# ifdef IEM_WITH_CODE_TLB
966 pVCpu->iem.s.cbInstrBufTotal = 0;
967# endif
968 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
969 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
970 {
971 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
972 pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
973 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
974 pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
975 }
976 else
977 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
978#else
979 NOREF(pVCpu);
980#endif
981}
982
983
984/**
985 * Invalidates the host physical aspects of the IEM TLBs.
986 *
987 * This is called internally as well as by PGM when moving GC mappings.
988 *
989 * @param pVM The cross context VM structure.
990 * @param idCpuCaller The ID of the calling EMT if available to the caller,
991 * otherwise NIL_VMCPUID.
992 * @param enmReason The reason we're called.
993 *
994 * @remarks Caller holds the PGM lock.
995 */
996VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller, IEMTLBPHYSFLUSHREASON enmReason)
997{
998#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
999 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
1000 if (pVCpuCaller)
1001 VMCPU_ASSERT_EMT(pVCpuCaller);
1002 Log10(("IEMTlbInvalidateAllPhysicalAllCpus: %d\n", enmReason)); RT_NOREF(enmReason);
1003
1004 VMCC_FOR_EACH_VMCPU(pVM)
1005 {
1006# ifdef IEM_WITH_CODE_TLB
1007 if (pVCpuCaller == pVCpu)
1008 pVCpu->iem.s.cbInstrBufTotal = 0;
1009# endif
1010
1011 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
1012 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
1013 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
1014 { /* likely */}
1015 else if (pVCpuCaller != pVCpu)
1016 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
1017 else
1018 {
1019 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
1020 continue;
1021 }
1022 if (ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev))
1023 pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
1024
1025 if (ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev))
1026 pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
1027 }
1028 VMCC_FOR_EACH_VMCPU_END(pVM);
1029
1030#else
1031 RT_NOREF(pVM, idCpuCaller, enmReason);
1032#endif
1033}
1034
1035
1036/**
1037 * Flushes the prefetch buffer, light version.
1038 */
1039void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr)
1040{
1041#ifndef IEM_WITH_CODE_TLB
1042 pVCpu->iem.s.cbOpcode = cbInstr;
1043#else
1044 RT_NOREF(pVCpu, cbInstr);
1045#endif
1046}
1047
1048
1049/**
1050 * Flushes the prefetch buffer, heavy version.
1051 */
1052void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr)
1053{
1054#ifndef IEM_WITH_CODE_TLB
1055 pVCpu->iem.s.cbOpcode = cbInstr; /* Note! SVM and VT-x may set this to zero on exit, rather than the instruction length. */
1056#elif 1
1057 pVCpu->iem.s.cbInstrBufTotal = 0;
1058 RT_NOREF(cbInstr);
1059#else
1060 RT_NOREF(pVCpu, cbInstr);
1061#endif
1062}
1063
1064
1065
1066#ifdef IEM_WITH_CODE_TLB
1067
1068/**
1069 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1070 * failure and jumps.
1071 *
1072 * We end up here for a number of reasons:
1073 * - pbInstrBuf isn't yet initialized.
1074 * - Advancing beyond the buffer boundrary (e.g. cross page).
1075 * - Advancing beyond the CS segment limit.
1076 * - Fetching from non-mappable page (e.g. MMIO).
1077 * - TLB loading in the recompiler (@a pvDst = NULL, @a cbDst = 0).
1078 *
1079 * @param pVCpu The cross context virtual CPU structure of the
1080 * calling thread.
1081 * @param pvDst Where to return the bytes.
1082 * @param cbDst Number of bytes to read. A value of zero is
1083 * allowed for initializing pbInstrBuf (the
1084 * recompiler does this). In this case it is best
1085 * to set pbInstrBuf to NULL prior to the call.
1086 */
1087void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP
1088{
1089# ifdef IN_RING3
1090 for (;;)
1091 {
1092 Assert(cbDst <= 8);
1093 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1094
1095 /*
1096 * We might have a partial buffer match, deal with that first to make the
1097 * rest simpler. This is the first part of the cross page/buffer case.
1098 */
1099 uint8_t const * const pbInstrBuf = pVCpu->iem.s.pbInstrBuf;
1100 if (pbInstrBuf != NULL)
1101 {
1102 Assert(cbDst != 0); /* pbInstrBuf shall be NULL in case of a TLB load */
1103 uint32_t const cbInstrBuf = pVCpu->iem.s.cbInstrBuf;
1104 if (offBuf < cbInstrBuf)
1105 {
1106 Assert(offBuf + cbDst > cbInstrBuf);
1107 uint32_t const cbCopy = cbInstrBuf - offBuf;
1108 memcpy(pvDst, &pbInstrBuf[offBuf], cbCopy);
1109
1110 cbDst -= cbCopy;
1111 pvDst = (uint8_t *)pvDst + cbCopy;
1112 offBuf += cbCopy;
1113 }
1114 }
1115
1116 /*
1117 * Check segment limit, figuring how much we're allowed to access at this point.
1118 *
1119 * We will fault immediately if RIP is past the segment limit / in non-canonical
1120 * territory. If we do continue, there are one or more bytes to read before we
1121 * end up in trouble and we need to do that first before faulting.
1122 */
1123 RTGCPTR GCPtrFirst;
1124 uint32_t cbMaxRead;
1125 if (IEM_IS_64BIT_CODE(pVCpu))
1126 {
1127 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1128 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1129 { /* likely */ }
1130 else
1131 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1132 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1133 }
1134 else
1135 {
1136 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1137 /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
1138 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
1139 { /* likely */ }
1140 else /** @todo For CPUs older than the 386, we should not necessarily generate \#GP here but wrap around! */
1141 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1142 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1143 if (cbMaxRead != 0)
1144 { /* likely */ }
1145 else
1146 {
1147 /* Overflowed because address is 0 and limit is max. */
1148 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1149 cbMaxRead = X86_PAGE_SIZE;
1150 }
1151 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
1152 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1153 if (cbMaxRead2 < cbMaxRead)
1154 cbMaxRead = cbMaxRead2;
1155 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1156 }
1157
1158 /*
1159 * Get the TLB entry for this piece of code.
1160 */
1161 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrFirst);
1162 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.CodeTlb, uTagNoRev);
1163 if ( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevision)
1164 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal))
1165 {
1166 /* likely when executing lots of code, otherwise unlikely */
1167# ifdef IEM_WITH_TLB_STATISTICS
1168 pVCpu->iem.s.CodeTlb.cTlbCoreHits++;
1169# endif
1170 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
1171
1172 /* Check TLB page table level access flags. */
1173 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1174 {
1175 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && IEM_GET_CPL(pVCpu) == 3)
1176 {
1177 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1178 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1179 }
1180 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1181 {
1182 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1183 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1184 }
1185 }
1186
1187 /* Look up the physical page info if necessary. */
1188 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1189 { /* not necessary */ }
1190 else
1191 {
1192 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
1193 { /* likely */ }
1194 else
1195 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
1196 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;
1197 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1198 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1199 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
1200 }
1201 }
1202 else
1203 {
1204 pVCpu->iem.s.CodeTlb.cTlbCoreMisses++;
1205
1206 /* This page table walking will set A bits as required by the access while performing the walk.
1207 ASSUMES these are set when the address is translated rather than on commit... */
1208 /** @todo testcase: check when A bits are actually set by the CPU for code. */
1209 PGMPTWALKFAST WalkFast;
1210 int rc = PGMGstQueryPageFast(pVCpu, GCPtrFirst,
1211 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
1212 &WalkFast);
1213 if (RT_SUCCESS(rc))
1214 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
1215 else
1216 {
1217#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1218 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? OF COURSE! */
1219 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
1220#endif
1221 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1222 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, rc);
1223 }
1224
1225 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1226 if ( !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)
1227 || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */
1228 {
1229 pTlbe--;
1230 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevision;
1231 }
1232 else
1233 {
1234 pVCpu->iem.s.CodeTlb.cTlbCoreGlobalLoads++;
1235 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal;
1236 }
1237 pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
1238 | (WalkFast.fEffective >> X86_PTE_PAE_BIT_NX) /*IEMTLBE_F_PT_NO_EXEC*/
1239 | (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE);
1240 RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
1241 pTlbe->GCPhys = GCPhysPg;
1242 pTlbe->pbMappingR3 = NULL;
1243 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
1244 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) || IEM_GET_CPL(pVCpu) != 3);
1245 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
1246
1247 /* Resolve the physical address. */
1248 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
1249 { /* likely */ }
1250 else
1251 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
1252 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));
1253 rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1254 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1255 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
1256 }
1257
1258# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
1259 /*
1260 * Try do a direct read using the pbMappingR3 pointer.
1261 * Note! Do not recheck the physical TLB revision number here as we have the
1262 * wrong response to changes in the else case. If someone is updating
1263 * pVCpu->iem.s.CodeTlb.uTlbPhysRev in parallel to us, we should be fine
1264 * pretending we always won the race.
1265 */
1266 if ( (pTlbe->fFlagsAndPhysRev & (/*IEMTLBE_F_PHYS_REV |*/ IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1267 == /*pVCpu->iem.s.CodeTlb.uTlbPhysRev*/ 0U)
1268 {
1269 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1270 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1271 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1272 {
1273 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1274 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1275 }
1276 else
1277 {
1278 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1279 if (cbInstr + (uint32_t)cbDst <= 15)
1280 {
1281 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1282 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1283 }
1284 else
1285 {
1286 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1287 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1288 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1289 }
1290 }
1291 if (cbDst <= cbMaxRead)
1292 {
1293 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0; /** @todo Spurious load effect on branch handling? */
1294 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1295
1296 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1297 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1298 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1299 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1300 if (cbDst > 0) /* To make ASAN happy in the TLB load case. */
1301 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1302 else
1303 Assert(!pvDst);
1304 return;
1305 }
1306 pVCpu->iem.s.pbInstrBuf = NULL;
1307
1308 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1309 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1310 }
1311# else
1312# error "refactor as needed"
1313 /*
1314 * If there is no special read handling, so we can read a bit more and
1315 * put it in the prefetch buffer.
1316 */
1317 if ( cbDst < cbMaxRead
1318 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1319 {
1320 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1321 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1322 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1323 { /* likely */ }
1324 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1325 {
1326 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1327 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1328 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1329 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));
1330 }
1331 else
1332 {
1333 Log((RT_SUCCESS(rcStrict)
1334 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1335 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1336 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1337 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1338 }
1339 }
1340# endif
1341 /*
1342 * Special read handling, so only read exactly what's needed.
1343 * This is a highly unlikely scenario.
1344 */
1345 else
1346 {
1347 pVCpu->iem.s.CodeTlb.cTlbSlowCodeReadPath++;
1348
1349 /* Check instruction length. */
1350 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1351 if (RT_LIKELY(cbInstr + cbDst <= 15))
1352 { /* likely */ }
1353 else
1354 {
1355 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0) [slow]\n",
1356 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1357 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1358 }
1359
1360 /* Do the reading. */
1361 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1362 if (cbToRead > 0)
1363 {
1364 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1365 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1366 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1367 { /* likely */ }
1368 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1369 {
1370 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1371 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1372 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1373 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));
1374 }
1375 else
1376 {
1377 Log((RT_SUCCESS(rcStrict)
1378 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1379 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1380 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1381 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1382 }
1383 }
1384
1385 /* Update the state and probably return. */
1386 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1387 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0;
1388 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1389
1390 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1391 pVCpu->iem.s.offInstrNextByte = offPg + cbInstr + cbToRead;
1392 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead + cbInstr) - cbToRead - cbInstr;
1393 pVCpu->iem.s.cbInstrBufTotal = X86_PAGE_SIZE; /** @todo ??? */
1394 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1395 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1396 pVCpu->iem.s.pbInstrBuf = NULL;
1397 if (cbToRead == cbDst)
1398 return;
1399 Assert(cbToRead == cbMaxRead);
1400 }
1401
1402 /*
1403 * More to read, loop.
1404 */
1405 cbDst -= cbMaxRead;
1406 pvDst = (uint8_t *)pvDst + cbMaxRead;
1407 }
1408# else /* !IN_RING3 */
1409 RT_NOREF(pvDst, cbDst);
1410 if (pvDst || cbDst)
1411 IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);
1412# endif /* !IN_RING3 */
1413}
1414
1415#else /* !IEM_WITH_CODE_TLB */
1416
1417/**
1418 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1419 * exception if it fails.
1420 *
1421 * @returns Strict VBox status code.
1422 * @param pVCpu The cross context virtual CPU structure of the
1423 * calling thread.
1424 * @param cbMin The minimum number of bytes relative offOpcode
1425 * that must be read.
1426 */
1427VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
1428{
1429 /*
1430 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1431 *
1432 * First translate CS:rIP to a physical address.
1433 */
1434 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;
1435 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1436 uint8_t const cbLeft = cbOpcode - offOpcode;
1437 Assert(cbLeft < cbMin);
1438 Assert(cbOpcode <= sizeof(pVCpu->iem.s.abOpcode));
1439
1440 uint32_t cbToTryRead;
1441 RTGCPTR GCPtrNext;
1442 if (IEM_IS_64BIT_CODE(pVCpu))
1443 {
1444 GCPtrNext = pVCpu->cpum.GstCtx.rip + cbOpcode;
1445 if (!IEM_IS_CANONICAL(GCPtrNext))
1446 return iemRaiseGeneralProtectionFault0(pVCpu);
1447 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1448 }
1449 else
1450 {
1451 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1452 /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
1453 GCPtrNext32 += cbOpcode;
1454 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1455 /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
1456 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1457 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1458 if (!cbToTryRead) /* overflowed */
1459 {
1460 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1461 cbToTryRead = UINT32_MAX;
1462 /** @todo check out wrapping around the code segment. */
1463 }
1464 if (cbToTryRead < cbMin - cbLeft)
1465 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1466 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1467
1468 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1469 if (cbToTryRead > cbLeftOnPage)
1470 cbToTryRead = cbLeftOnPage;
1471 }
1472
1473 /* Restrict to opcode buffer space.
1474
1475 We're making ASSUMPTIONS here based on work done previously in
1476 iemInitDecoderAndPrefetchOpcodes, where bytes from the first page will
1477 be fetched in case of an instruction crossing two pages. */
1478 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - cbOpcode)
1479 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - cbOpcode;
1480 if (RT_LIKELY(cbToTryRead + cbLeft >= cbMin))
1481 { /* likely */ }
1482 else
1483 {
1484 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1485 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, offOpcode, cbMin));
1486 return iemRaiseGeneralProtectionFault0(pVCpu);
1487 }
1488
1489 PGMPTWALKFAST WalkFast;
1490 int rc = PGMGstQueryPageFast(pVCpu, GCPtrNext,
1491 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
1492 &WalkFast);
1493 if (RT_SUCCESS(rc))
1494 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
1495 else
1496 {
1497 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1498#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1499 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
1500 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1501#endif
1502 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc);
1503 }
1504 Assert((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3);
1505 Assert(!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
1506
1507 RTGCPHYS const GCPhys = WalkFast.GCPhys;
1508 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode));
1509
1510 /*
1511 * Read the bytes at this address.
1512 *
1513 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1514 * and since PATM should only patch the start of an instruction there
1515 * should be no need to check again here.
1516 */
1517 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
1518 {
1519 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[cbOpcode],
1520 cbToTryRead, PGMACCESSORIGIN_IEM);
1521 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1522 { /* likely */ }
1523 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1524 {
1525 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1526 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1527 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1528 }
1529 else
1530 {
1531 Log((RT_SUCCESS(rcStrict)
1532 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1533 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1534 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1535 return rcStrict;
1536 }
1537 }
1538 else
1539 {
1540 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[cbOpcode], GCPhys, cbToTryRead);
1541 if (RT_SUCCESS(rc))
1542 { /* likely */ }
1543 else
1544 {
1545 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1546 return rc;
1547 }
1548 }
1549 pVCpu->iem.s.cbOpcode = cbOpcode + cbToTryRead;
1550 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1551
1552 return VINF_SUCCESS;
1553}
1554
1555#endif /* !IEM_WITH_CODE_TLB */
1556#ifndef IEM_WITH_SETJMP
1557
1558/**
1559 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1560 *
1561 * @returns Strict VBox status code.
1562 * @param pVCpu The cross context virtual CPU structure of the
1563 * calling thread.
1564 * @param pb Where to return the opcode byte.
1565 */
1566VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1567{
1568 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1569 if (rcStrict == VINF_SUCCESS)
1570 {
1571 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1572 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1573 pVCpu->iem.s.offOpcode = offOpcode + 1;
1574 }
1575 else
1576 *pb = 0;
1577 return rcStrict;
1578}
1579
1580#else /* IEM_WITH_SETJMP */
1581
1582/**
1583 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1584 *
1585 * @returns The opcode byte.
1586 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1587 */
1588uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1589{
1590# ifdef IEM_WITH_CODE_TLB
1591 uint8_t u8;
1592 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1593 return u8;
1594# else
1595 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1596 if (rcStrict == VINF_SUCCESS)
1597 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1598 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1599# endif
1600}
1601
1602#endif /* IEM_WITH_SETJMP */
1603
1604#ifndef IEM_WITH_SETJMP
1605
1606/**
1607 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1608 *
1609 * @returns Strict VBox status code.
1610 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1611 * @param pu16 Where to return the opcode dword.
1612 */
1613VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1614{
1615 uint8_t u8;
1616 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1617 if (rcStrict == VINF_SUCCESS)
1618 *pu16 = (int8_t)u8;
1619 return rcStrict;
1620}
1621
1622
1623/**
1624 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1625 *
1626 * @returns Strict VBox status code.
1627 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1628 * @param pu32 Where to return the opcode dword.
1629 */
1630VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1631{
1632 uint8_t u8;
1633 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1634 if (rcStrict == VINF_SUCCESS)
1635 *pu32 = (int8_t)u8;
1636 return rcStrict;
1637}
1638
1639
1640/**
1641 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1642 *
1643 * @returns Strict VBox status code.
1644 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1645 * @param pu64 Where to return the opcode qword.
1646 */
1647VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1648{
1649 uint8_t u8;
1650 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1651 if (rcStrict == VINF_SUCCESS)
1652 *pu64 = (int8_t)u8;
1653 return rcStrict;
1654}
1655
1656#endif /* !IEM_WITH_SETJMP */
1657
1658
1659#ifndef IEM_WITH_SETJMP
1660
1661/**
1662 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1663 *
1664 * @returns Strict VBox status code.
1665 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1666 * @param pu16 Where to return the opcode word.
1667 */
1668VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1669{
1670 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1671 if (rcStrict == VINF_SUCCESS)
1672 {
1673 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1674# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1675 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1676# else
1677 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1678# endif
1679 pVCpu->iem.s.offOpcode = offOpcode + 2;
1680 }
1681 else
1682 *pu16 = 0;
1683 return rcStrict;
1684}
1685
1686#else /* IEM_WITH_SETJMP */
1687
1688/**
1689 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1690 *
1691 * @returns The opcode word.
1692 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1693 */
1694uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1695{
1696# ifdef IEM_WITH_CODE_TLB
1697 uint16_t u16;
1698 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1699 return u16;
1700# else
1701 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1702 if (rcStrict == VINF_SUCCESS)
1703 {
1704 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1705 pVCpu->iem.s.offOpcode += 2;
1706# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1707 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1708# else
1709 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1710# endif
1711 }
1712 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1713# endif
1714}
1715
1716#endif /* IEM_WITH_SETJMP */
1717
1718#ifndef IEM_WITH_SETJMP
1719
1720/**
1721 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1722 *
1723 * @returns Strict VBox status code.
1724 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1725 * @param pu32 Where to return the opcode double word.
1726 */
1727VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1728{
1729 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1730 if (rcStrict == VINF_SUCCESS)
1731 {
1732 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1733 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1734 pVCpu->iem.s.offOpcode = offOpcode + 2;
1735 }
1736 else
1737 *pu32 = 0;
1738 return rcStrict;
1739}
1740
1741
1742/**
1743 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1744 *
1745 * @returns Strict VBox status code.
1746 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1747 * @param pu64 Where to return the opcode quad word.
1748 */
1749VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1750{
1751 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1752 if (rcStrict == VINF_SUCCESS)
1753 {
1754 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1755 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1756 pVCpu->iem.s.offOpcode = offOpcode + 2;
1757 }
1758 else
1759 *pu64 = 0;
1760 return rcStrict;
1761}
1762
1763#endif /* !IEM_WITH_SETJMP */
1764
1765#ifndef IEM_WITH_SETJMP
1766
1767/**
1768 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1769 *
1770 * @returns Strict VBox status code.
1771 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1772 * @param pu32 Where to return the opcode dword.
1773 */
1774VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1775{
1776 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1777 if (rcStrict == VINF_SUCCESS)
1778 {
1779 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1780# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1781 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1782# else
1783 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1784 pVCpu->iem.s.abOpcode[offOpcode + 1],
1785 pVCpu->iem.s.abOpcode[offOpcode + 2],
1786 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1787# endif
1788 pVCpu->iem.s.offOpcode = offOpcode + 4;
1789 }
1790 else
1791 *pu32 = 0;
1792 return rcStrict;
1793}
1794
1795#else /* IEM_WITH_SETJMP */
1796
1797/**
1798 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1799 *
1800 * @returns The opcode dword.
1801 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1802 */
1803uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1804{
1805# ifdef IEM_WITH_CODE_TLB
1806 uint32_t u32;
1807 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1808 return u32;
1809# else
1810 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1811 if (rcStrict == VINF_SUCCESS)
1812 {
1813 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1814 pVCpu->iem.s.offOpcode = offOpcode + 4;
1815# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1816 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1817# else
1818 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1819 pVCpu->iem.s.abOpcode[offOpcode + 1],
1820 pVCpu->iem.s.abOpcode[offOpcode + 2],
1821 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1822# endif
1823 }
1824 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1825# endif
1826}
1827
1828#endif /* IEM_WITH_SETJMP */
1829
1830#ifndef IEM_WITH_SETJMP
1831
1832/**
1833 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1834 *
1835 * @returns Strict VBox status code.
1836 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1837 * @param pu64 Where to return the opcode dword.
1838 */
1839VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1840{
1841 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1842 if (rcStrict == VINF_SUCCESS)
1843 {
1844 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1845 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1846 pVCpu->iem.s.abOpcode[offOpcode + 1],
1847 pVCpu->iem.s.abOpcode[offOpcode + 2],
1848 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1849 pVCpu->iem.s.offOpcode = offOpcode + 4;
1850 }
1851 else
1852 *pu64 = 0;
1853 return rcStrict;
1854}
1855
1856
1857/**
1858 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1859 *
1860 * @returns Strict VBox status code.
1861 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1862 * @param pu64 Where to return the opcode qword.
1863 */
1864VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1865{
1866 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1867 if (rcStrict == VINF_SUCCESS)
1868 {
1869 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1870 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1871 pVCpu->iem.s.abOpcode[offOpcode + 1],
1872 pVCpu->iem.s.abOpcode[offOpcode + 2],
1873 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1874 pVCpu->iem.s.offOpcode = offOpcode + 4;
1875 }
1876 else
1877 *pu64 = 0;
1878 return rcStrict;
1879}
1880
1881#endif /* !IEM_WITH_SETJMP */
1882
1883#ifndef IEM_WITH_SETJMP
1884
1885/**
1886 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1887 *
1888 * @returns Strict VBox status code.
1889 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1890 * @param pu64 Where to return the opcode qword.
1891 */
1892VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1893{
1894 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1895 if (rcStrict == VINF_SUCCESS)
1896 {
1897 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1898# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1899 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1900# else
1901 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1902 pVCpu->iem.s.abOpcode[offOpcode + 1],
1903 pVCpu->iem.s.abOpcode[offOpcode + 2],
1904 pVCpu->iem.s.abOpcode[offOpcode + 3],
1905 pVCpu->iem.s.abOpcode[offOpcode + 4],
1906 pVCpu->iem.s.abOpcode[offOpcode + 5],
1907 pVCpu->iem.s.abOpcode[offOpcode + 6],
1908 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1909# endif
1910 pVCpu->iem.s.offOpcode = offOpcode + 8;
1911 }
1912 else
1913 *pu64 = 0;
1914 return rcStrict;
1915}
1916
1917#else /* IEM_WITH_SETJMP */
1918
1919/**
1920 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1921 *
1922 * @returns The opcode qword.
1923 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1924 */
1925uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1926{
1927# ifdef IEM_WITH_CODE_TLB
1928 uint64_t u64;
1929 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1930 return u64;
1931# else
1932 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1933 if (rcStrict == VINF_SUCCESS)
1934 {
1935 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1936 pVCpu->iem.s.offOpcode = offOpcode + 8;
1937# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1938 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1939# else
1940 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1941 pVCpu->iem.s.abOpcode[offOpcode + 1],
1942 pVCpu->iem.s.abOpcode[offOpcode + 2],
1943 pVCpu->iem.s.abOpcode[offOpcode + 3],
1944 pVCpu->iem.s.abOpcode[offOpcode + 4],
1945 pVCpu->iem.s.abOpcode[offOpcode + 5],
1946 pVCpu->iem.s.abOpcode[offOpcode + 6],
1947 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1948# endif
1949 }
1950 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1951# endif
1952}
1953
1954#endif /* IEM_WITH_SETJMP */
1955
1956
1957
1958/** @name Misc Worker Functions.
1959 * @{
1960 */
1961
1962/**
1963 * Gets the exception class for the specified exception vector.
1964 *
1965 * @returns The class of the specified exception.
1966 * @param uVector The exception vector.
1967 */
1968static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
1969{
1970 Assert(uVector <= X86_XCPT_LAST);
1971 switch (uVector)
1972 {
1973 case X86_XCPT_DE:
1974 case X86_XCPT_TS:
1975 case X86_XCPT_NP:
1976 case X86_XCPT_SS:
1977 case X86_XCPT_GP:
1978 case X86_XCPT_SX: /* AMD only */
1979 return IEMXCPTCLASS_CONTRIBUTORY;
1980
1981 case X86_XCPT_PF:
1982 case X86_XCPT_VE: /* Intel only */
1983 return IEMXCPTCLASS_PAGE_FAULT;
1984
1985 case X86_XCPT_DF:
1986 return IEMXCPTCLASS_DOUBLE_FAULT;
1987 }
1988 return IEMXCPTCLASS_BENIGN;
1989}
1990
1991
1992/**
1993 * Evaluates how to handle an exception caused during delivery of another event
1994 * (exception / interrupt).
1995 *
1996 * @returns How to handle the recursive exception.
1997 * @param pVCpu The cross context virtual CPU structure of the
1998 * calling thread.
1999 * @param fPrevFlags The flags of the previous event.
2000 * @param uPrevVector The vector of the previous event.
2001 * @param fCurFlags The flags of the current exception.
2002 * @param uCurVector The vector of the current exception.
2003 * @param pfXcptRaiseInfo Where to store additional information about the
2004 * exception condition. Optional.
2005 */
2006VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
2007 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
2008{
2009 /*
2010 * Only CPU exceptions can be raised while delivering other events, software interrupt
2011 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
2012 */
2013 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
2014 Assert(pVCpu); RT_NOREF(pVCpu);
2015 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
2016
2017 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
2018 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
2019 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2020 {
2021 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
2022 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
2023 {
2024 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
2025 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
2026 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
2027 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
2028 {
2029 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
2030 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
2031 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
2032 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
2033 uCurVector, pVCpu->cpum.GstCtx.cr2));
2034 }
2035 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
2036 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
2037 {
2038 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
2039 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
2040 }
2041 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
2042 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
2043 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
2044 {
2045 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
2046 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
2047 }
2048 }
2049 else
2050 {
2051 if (uPrevVector == X86_XCPT_NMI)
2052 {
2053 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
2054 if (uCurVector == X86_XCPT_PF)
2055 {
2056 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
2057 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
2058 }
2059 }
2060 else if ( uPrevVector == X86_XCPT_AC
2061 && uCurVector == X86_XCPT_AC)
2062 {
2063 enmRaise = IEMXCPTRAISE_CPU_HANG;
2064 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
2065 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
2066 }
2067 }
2068 }
2069 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
2070 {
2071 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
2072 if (uCurVector == X86_XCPT_PF)
2073 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
2074 }
2075 else
2076 {
2077 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
2078 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
2079 }
2080
2081 if (pfXcptRaiseInfo)
2082 *pfXcptRaiseInfo = fRaiseInfo;
2083 return enmRaise;
2084}
2085
2086
2087/**
2088 * Enters the CPU shutdown state initiated by a triple fault or other
2089 * unrecoverable conditions.
2090 *
2091 * @returns Strict VBox status code.
2092 * @param pVCpu The cross context virtual CPU structure of the
2093 * calling thread.
2094 */
2095static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
2096{
2097 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2098 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
2099
2100 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
2101 {
2102 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
2103 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
2104 }
2105
2106 RT_NOREF(pVCpu);
2107 return VINF_EM_TRIPLE_FAULT;
2108}
2109
2110
2111/**
2112 * Validates a new SS segment.
2113 *
2114 * @returns VBox strict status code.
2115 * @param pVCpu The cross context virtual CPU structure of the
2116 * calling thread.
2117 * @param NewSS The new SS selctor.
2118 * @param uCpl The CPL to load the stack for.
2119 * @param pDesc Where to return the descriptor.
2120 */
2121static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
2122{
2123 /* Null selectors are not allowed (we're not called for dispatching
2124 interrupts with SS=0 in long mode). */
2125 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
2126 {
2127 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
2128 return iemRaiseTaskSwitchFault0(pVCpu);
2129 }
2130
2131 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
2132 if ((NewSS & X86_SEL_RPL) != uCpl)
2133 {
2134 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
2135 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2136 }
2137
2138 /*
2139 * Read the descriptor.
2140 */
2141 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
2142 if (rcStrict != VINF_SUCCESS)
2143 return rcStrict;
2144
2145 /*
2146 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
2147 */
2148 if (!pDesc->Legacy.Gen.u1DescType)
2149 {
2150 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
2151 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2152 }
2153
2154 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2155 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2156 {
2157 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
2158 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2159 }
2160 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
2161 {
2162 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
2163 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2164 }
2165
2166 /* Is it there? */
2167 /** @todo testcase: Is this checked before the canonical / limit check below? */
2168 if (!pDesc->Legacy.Gen.u1Present)
2169 {
2170 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
2171 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
2172 }
2173
2174 return VINF_SUCCESS;
2175}
2176
2177/** @} */
2178
2179
2180/** @name Raising Exceptions.
2181 *
2182 * @{
2183 */
2184
2185
2186/**
2187 * Loads the specified stack far pointer from the TSS.
2188 *
2189 * @returns VBox strict status code.
2190 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2191 * @param uCpl The CPL to load the stack for.
2192 * @param pSelSS Where to return the new stack segment.
2193 * @param puEsp Where to return the new stack pointer.
2194 */
2195static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
2196{
2197 VBOXSTRICTRC rcStrict;
2198 Assert(uCpl < 4);
2199
2200 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2201 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
2202 {
2203 /*
2204 * 16-bit TSS (X86TSS16).
2205 */
2206 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
2207 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2208 {
2209 uint32_t off = uCpl * 4 + 2;
2210 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
2211 {
2212 /** @todo check actual access pattern here. */
2213 uint32_t u32Tmp = 0; /* gcc maybe... */
2214 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2215 if (rcStrict == VINF_SUCCESS)
2216 {
2217 *puEsp = RT_LOWORD(u32Tmp);
2218 *pSelSS = RT_HIWORD(u32Tmp);
2219 return VINF_SUCCESS;
2220 }
2221 }
2222 else
2223 {
2224 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
2225 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2226 }
2227 break;
2228 }
2229
2230 /*
2231 * 32-bit TSS (X86TSS32).
2232 */
2233 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
2234 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2235 {
2236 uint32_t off = uCpl * 8 + 4;
2237 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
2238 {
2239/** @todo check actual access pattern here. */
2240 uint64_t u64Tmp;
2241 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2242 if (rcStrict == VINF_SUCCESS)
2243 {
2244 *puEsp = u64Tmp & UINT32_MAX;
2245 *pSelSS = (RTSEL)(u64Tmp >> 32);
2246 return VINF_SUCCESS;
2247 }
2248 }
2249 else
2250 {
2251 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
2252 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2253 }
2254 break;
2255 }
2256
2257 default:
2258 AssertFailed();
2259 rcStrict = VERR_IEM_IPE_4;
2260 break;
2261 }
2262
2263 *puEsp = 0; /* make gcc happy */
2264 *pSelSS = 0; /* make gcc happy */
2265 return rcStrict;
2266}
2267
2268
2269/**
2270 * Loads the specified stack pointer from the 64-bit TSS.
2271 *
2272 * @returns VBox strict status code.
2273 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2274 * @param uCpl The CPL to load the stack for.
2275 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2276 * @param puRsp Where to return the new stack pointer.
2277 */
2278static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
2279{
2280 Assert(uCpl < 4);
2281 Assert(uIst < 8);
2282 *puRsp = 0; /* make gcc happy */
2283
2284 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2285 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
2286
2287 uint32_t off;
2288 if (uIst)
2289 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
2290 else
2291 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
2292 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
2293 {
2294 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
2295 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2296 }
2297
2298 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2299}
2300
2301
2302/**
2303 * Adjust the CPU state according to the exception being raised.
2304 *
2305 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2306 * @param u8Vector The exception that has been raised.
2307 */
2308DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
2309{
2310 switch (u8Vector)
2311 {
2312 case X86_XCPT_DB:
2313 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
2314 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
2315 break;
2316 /** @todo Read the AMD and Intel exception reference... */
2317 }
2318}
2319
2320
2321/**
2322 * Implements exceptions and interrupts for real mode.
2323 *
2324 * @returns VBox strict status code.
2325 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2326 * @param cbInstr The number of bytes to offset rIP by in the return
2327 * address.
2328 * @param u8Vector The interrupt / exception vector number.
2329 * @param fFlags The flags.
2330 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2331 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2332 */
2333static VBOXSTRICTRC
2334iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
2335 uint8_t cbInstr,
2336 uint8_t u8Vector,
2337 uint32_t fFlags,
2338 uint16_t uErr,
2339 uint64_t uCr2) RT_NOEXCEPT
2340{
2341 NOREF(uErr); NOREF(uCr2);
2342 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2343
2344 /*
2345 * Read the IDT entry.
2346 */
2347 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2348 {
2349 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2350 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2351 }
2352 RTFAR16 Idte;
2353 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
2354 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2355 {
2356 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2357 return rcStrict;
2358 }
2359
2360#ifdef LOG_ENABLED
2361 /* If software interrupt, try decode it if logging is enabled and such. */
2362 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2363 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
2364 iemLogSyscallRealModeInt(pVCpu, u8Vector, cbInstr);
2365#endif
2366
2367 /*
2368 * Push the stack frame.
2369 */
2370 uint8_t bUnmapInfo;
2371 uint16_t *pu16Frame;
2372 uint64_t uNewRsp;
2373 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &bUnmapInfo, &uNewRsp);
2374 if (rcStrict != VINF_SUCCESS)
2375 return rcStrict;
2376
2377 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
2378#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
2379 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
2380 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
2381 fEfl |= UINT16_C(0xf000);
2382#endif
2383 pu16Frame[2] = (uint16_t)fEfl;
2384 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
2385 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
2386 rcStrict = iemMemStackPushCommitSpecial(pVCpu, bUnmapInfo, uNewRsp);
2387 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2388 return rcStrict;
2389
2390 /*
2391 * Load the vector address into cs:ip and make exception specific state
2392 * adjustments.
2393 */
2394 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
2395 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
2396 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2397 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
2398 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2399 pVCpu->cpum.GstCtx.rip = Idte.off;
2400 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
2401 IEMMISC_SET_EFL(pVCpu, fEfl);
2402
2403 /** @todo do we actually do this in real mode? */
2404 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2405 iemRaiseXcptAdjustState(pVCpu, u8Vector);
2406
2407 /*
2408 * Deal with debug events that follows the exception and clear inhibit flags.
2409 */
2410 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2411 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))
2412 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
2413 else
2414 {
2415 Log(("iemRaiseXcptOrIntInRealMode: Raising #DB after %#x; pending=%#x\n",
2416 u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));
2417 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
2418 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
2419 >> CPUMCTX_DBG_HIT_DRX_SHIFT;
2420 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
2421 return iemRaiseDebugException(pVCpu);
2422 }
2423
2424 /* The IEM_F_MODE_XXX and IEM_F_X86_CPL_MASK doesn't really change here,
2425 so best leave them alone in case we're in a weird kind of real mode... */
2426
2427 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2428}
2429
2430
2431/**
2432 * Loads a NULL data selector into when coming from V8086 mode.
2433 *
2434 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2435 * @param pSReg Pointer to the segment register.
2436 */
2437DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
2438{
2439 pSReg->Sel = 0;
2440 pSReg->ValidSel = 0;
2441 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2442 {
2443 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2444 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2445 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2446 }
2447 else
2448 {
2449 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2450 /** @todo check this on AMD-V */
2451 pSReg->u64Base = 0;
2452 pSReg->u32Limit = 0;
2453 }
2454}
2455
2456
2457/**
2458 * Loads a segment selector during a task switch in V8086 mode.
2459 *
2460 * @param pSReg Pointer to the segment register.
2461 * @param uSel The selector value to load.
2462 */
2463DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
2464{
2465 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2466 pSReg->Sel = uSel;
2467 pSReg->ValidSel = uSel;
2468 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2469 pSReg->u64Base = uSel << 4;
2470 pSReg->u32Limit = 0xffff;
2471 pSReg->Attr.u = 0xf3;
2472}
2473
2474
2475/**
2476 * Loads a segment selector during a task switch in protected mode.
2477 *
2478 * In this task switch scenario, we would throw \#TS exceptions rather than
2479 * \#GPs.
2480 *
2481 * @returns VBox strict status code.
2482 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2483 * @param pSReg Pointer to the segment register.
2484 * @param uSel The new selector value.
2485 *
2486 * @remarks This does _not_ handle CS or SS.
2487 * @remarks This expects IEM_GET_CPL(pVCpu) to return an up to date value.
2488 */
2489static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
2490{
2491 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2492
2493 /* Null data selector. */
2494 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2495 {
2496 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2497 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2498 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2499 return VINF_SUCCESS;
2500 }
2501
2502 /* Fetch the descriptor. */
2503 IEMSELDESC Desc;
2504 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2505 if (rcStrict != VINF_SUCCESS)
2506 {
2507 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2508 VBOXSTRICTRC_VAL(rcStrict)));
2509 return rcStrict;
2510 }
2511
2512 /* Must be a data segment or readable code segment. */
2513 if ( !Desc.Legacy.Gen.u1DescType
2514 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2515 {
2516 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2517 Desc.Legacy.Gen.u4Type));
2518 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2519 }
2520
2521 /* Check privileges for data segments and non-conforming code segments. */
2522 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2523 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2524 {
2525 /* The RPL and the new CPL must be less than or equal to the DPL. */
2526 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2527 || (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl))
2528 {
2529 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2530 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
2531 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2532 }
2533 }
2534
2535 /* Is it there? */
2536 if (!Desc.Legacy.Gen.u1Present)
2537 {
2538 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2539 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2540 }
2541
2542 /* The base and limit. */
2543 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2544 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2545
2546 /*
2547 * Ok, everything checked out fine. Now set the accessed bit before
2548 * committing the result into the registers.
2549 */
2550 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2551 {
2552 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2553 if (rcStrict != VINF_SUCCESS)
2554 return rcStrict;
2555 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2556 }
2557
2558 /* Commit */
2559 pSReg->Sel = uSel;
2560 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2561 pSReg->u32Limit = cbLimit;
2562 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2563 pSReg->ValidSel = uSel;
2564 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2565 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2566 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2567
2568 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2569 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2570 return VINF_SUCCESS;
2571}
2572
2573
2574/**
2575 * Performs a task switch.
2576 *
2577 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2578 * caller is responsible for performing the necessary checks (like DPL, TSS
2579 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2580 * reference for JMP, CALL, IRET.
2581 *
2582 * If the task switch is the due to a software interrupt or hardware exception,
2583 * the caller is responsible for validating the TSS selector and descriptor. See
2584 * Intel Instruction reference for INT n.
2585 *
2586 * @returns VBox strict status code.
2587 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2588 * @param enmTaskSwitch The cause of the task switch.
2589 * @param uNextEip The EIP effective after the task switch.
2590 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2591 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2592 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2593 * @param SelTss The TSS selector of the new task.
2594 * @param pNewDescTss Pointer to the new TSS descriptor.
2595 */
2596VBOXSTRICTRC
2597iemTaskSwitch(PVMCPUCC pVCpu,
2598 IEMTASKSWITCH enmTaskSwitch,
2599 uint32_t uNextEip,
2600 uint32_t fFlags,
2601 uint16_t uErr,
2602 uint64_t uCr2,
2603 RTSEL SelTss,
2604 PIEMSELDESC pNewDescTss) RT_NOEXCEPT
2605{
2606 Assert(!IEM_IS_REAL_MODE(pVCpu));
2607 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2608 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2609
2610 uint32_t const uNewTssType = pNewDescTss->Legacy.Gate.u4Type;
2611 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2612 || uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2613 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2614 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2615
2616 bool const fIsNewTss386 = ( uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2617 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2618
2619 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTss=%#x fIsNewTss386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTss,
2620 fIsNewTss386, pVCpu->cpum.GstCtx.eip, uNextEip));
2621
2622 /* Update CR2 in case it's a page-fault. */
2623 /** @todo This should probably be done much earlier in IEM/PGM. See
2624 * @bugref{5653#c49}. */
2625 if (fFlags & IEM_XCPT_FLAGS_CR2)
2626 pVCpu->cpum.GstCtx.cr2 = uCr2;
2627
2628 /*
2629 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2630 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2631 */
2632 uint32_t const uNewTssLimit = pNewDescTss->Legacy.Gen.u16LimitLow | (pNewDescTss->Legacy.Gen.u4LimitHigh << 16);
2633 uint32_t const uNewTssLimitMin = fIsNewTss386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2634 if (uNewTssLimit < uNewTssLimitMin)
2635 {
2636 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTssLimit=%#x uNewTssLimitMin=%#x -> #TS\n",
2637 enmTaskSwitch, uNewTssLimit, uNewTssLimitMin));
2638 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
2639 }
2640
2641 /*
2642 * Task switches in VMX non-root mode always cause task switches.
2643 * The new TSS must have been read and validated (DPL, limits etc.) before a
2644 * task-switch VM-exit commences.
2645 *
2646 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2647 */
2648 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2649 {
2650 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTss));
2651 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTss, uNextEip - pVCpu->cpum.GstCtx.eip);
2652 }
2653
2654 /*
2655 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2656 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2657 */
2658 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2659 {
2660 uint64_t const uExitInfo1 = SelTss;
2661 uint64_t uExitInfo2 = uErr;
2662 switch (enmTaskSwitch)
2663 {
2664 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2665 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2666 default: break;
2667 }
2668 if (fFlags & IEM_XCPT_FLAGS_ERR)
2669 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2670 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2671 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2672
2673 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2674 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2675 RT_NOREF2(uExitInfo1, uExitInfo2);
2676 }
2677
2678 /*
2679 * Check the current TSS limit. The last written byte to the current TSS during the
2680 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2681 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2682 *
2683 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2684 * end up with smaller than "legal" TSS limits.
2685 */
2686 uint32_t const uCurTssLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2687 uint32_t const uCurTssLimitMin = fIsNewTss386 ? 0x5F : 0x29;
2688 if (uCurTssLimit < uCurTssLimitMin)
2689 {
2690 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTssLimit=%#x uCurTssLimitMin=%#x -> #TS\n",
2691 enmTaskSwitch, uCurTssLimit, uCurTssLimitMin));
2692 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
2693 }
2694
2695 /*
2696 * Verify that the new TSS can be accessed and map it. Map only the required contents
2697 * and not the entire TSS.
2698 */
2699 uint8_t bUnmapInfoNewTss;
2700 void *pvNewTss;
2701 uint32_t const cbNewTss = uNewTssLimitMin + 1;
2702 RTGCPTR const GCPtrNewTss = X86DESC_BASE(&pNewDescTss->Legacy);
2703 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2704 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2705 * not perform correct translation if this happens. See Intel spec. 7.2.1
2706 * "Task-State Segment". */
2707 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTss, &bUnmapInfoNewTss, cbNewTss, UINT8_MAX, GCPtrNewTss, IEM_ACCESS_SYS_RW, 0);
2708/** @todo Not cleaning up bUnmapInfoNewTss mapping in any early exits here.
2709 * Consider wrapping the remainder into a function for simpler cleanup. */
2710 if (rcStrict != VINF_SUCCESS)
2711 {
2712 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTss=%u uNewTssLimit=%u rc=%Rrc\n", enmTaskSwitch,
2713 cbNewTss, uNewTssLimit, VBOXSTRICTRC_VAL(rcStrict)));
2714 return rcStrict;
2715 }
2716
2717 /*
2718 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2719 */
2720 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
2721 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2722 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2723 {
2724 uint8_t bUnmapInfoDescCurTss;
2725 PX86DESC pDescCurTss;
2726 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTss, &bUnmapInfoDescCurTss, sizeof(*pDescCurTss), UINT8_MAX,
2727 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2728 if (rcStrict != VINF_SUCCESS)
2729 {
2730 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2731 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2732 return rcStrict;
2733 }
2734
2735 pDescCurTss->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2736 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoDescCurTss);
2737 if (rcStrict != VINF_SUCCESS)
2738 {
2739 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2740 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2741 return rcStrict;
2742 }
2743
2744 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2745 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2746 {
2747 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2748 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2749 fEFlags &= ~X86_EFL_NT;
2750 }
2751 }
2752
2753 /*
2754 * Save the CPU state into the current TSS.
2755 */
2756 RTGCPTR const GCPtrCurTss = pVCpu->cpum.GstCtx.tr.u64Base;
2757 if (GCPtrNewTss == GCPtrCurTss)
2758 {
2759 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTss));
2760 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2761 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,
2762 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2763 pVCpu->cpum.GstCtx.ldtr.Sel));
2764 }
2765 if (fIsNewTss386)
2766 {
2767 /*
2768 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2769 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2770 */
2771 uint8_t bUnmapInfoCurTss32;
2772 void *pvCurTss32;
2773 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS32, eip);
2774 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2775 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2776 rcStrict = iemMemMap(pVCpu, &pvCurTss32, &bUnmapInfoCurTss32, cbCurTss, UINT8_MAX,
2777 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
2778 if (rcStrict != VINF_SUCCESS)
2779 {
2780 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
2781 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
2782 return rcStrict;
2783 }
2784
2785 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
2786 PX86TSS32 pCurTss32 = (PX86TSS32)((uintptr_t)pvCurTss32 - offCurTss);
2787 pCurTss32->eip = uNextEip;
2788 pCurTss32->eflags = fEFlags;
2789 pCurTss32->eax = pVCpu->cpum.GstCtx.eax;
2790 pCurTss32->ecx = pVCpu->cpum.GstCtx.ecx;
2791 pCurTss32->edx = pVCpu->cpum.GstCtx.edx;
2792 pCurTss32->ebx = pVCpu->cpum.GstCtx.ebx;
2793 pCurTss32->esp = pVCpu->cpum.GstCtx.esp;
2794 pCurTss32->ebp = pVCpu->cpum.GstCtx.ebp;
2795 pCurTss32->esi = pVCpu->cpum.GstCtx.esi;
2796 pCurTss32->edi = pVCpu->cpum.GstCtx.edi;
2797 pCurTss32->es = pVCpu->cpum.GstCtx.es.Sel;
2798 pCurTss32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2799 pCurTss32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2800 pCurTss32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2801 pCurTss32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2802 pCurTss32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2803
2804 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss32);
2805 if (rcStrict != VINF_SUCCESS)
2806 {
2807 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2808 VBOXSTRICTRC_VAL(rcStrict)));
2809 return rcStrict;
2810 }
2811 }
2812 else
2813 {
2814 /*
2815 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2816 */
2817 uint8_t bUnmapInfoCurTss16;
2818 void *pvCurTss16;
2819 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS16, ip);
2820 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2821 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2822 rcStrict = iemMemMap(pVCpu, &pvCurTss16, &bUnmapInfoCurTss16, cbCurTss, UINT8_MAX,
2823 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
2824 if (rcStrict != VINF_SUCCESS)
2825 {
2826 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
2827 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
2828 return rcStrict;
2829 }
2830
2831 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
2832 PX86TSS16 pCurTss16 = (PX86TSS16)((uintptr_t)pvCurTss16 - offCurTss);
2833 pCurTss16->ip = uNextEip;
2834 pCurTss16->flags = (uint16_t)fEFlags;
2835 pCurTss16->ax = pVCpu->cpum.GstCtx.ax;
2836 pCurTss16->cx = pVCpu->cpum.GstCtx.cx;
2837 pCurTss16->dx = pVCpu->cpum.GstCtx.dx;
2838 pCurTss16->bx = pVCpu->cpum.GstCtx.bx;
2839 pCurTss16->sp = pVCpu->cpum.GstCtx.sp;
2840 pCurTss16->bp = pVCpu->cpum.GstCtx.bp;
2841 pCurTss16->si = pVCpu->cpum.GstCtx.si;
2842 pCurTss16->di = pVCpu->cpum.GstCtx.di;
2843 pCurTss16->es = pVCpu->cpum.GstCtx.es.Sel;
2844 pCurTss16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2845 pCurTss16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2846 pCurTss16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2847
2848 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss16);
2849 if (rcStrict != VINF_SUCCESS)
2850 {
2851 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2852 VBOXSTRICTRC_VAL(rcStrict)));
2853 return rcStrict;
2854 }
2855 }
2856
2857 /*
2858 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2859 */
2860 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2861 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2862 {
2863 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2864 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTss;
2865 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2866 }
2867
2868 /*
2869 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2870 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2871 */
2872 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2873 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2874 bool fNewDebugTrap;
2875 if (fIsNewTss386)
2876 {
2877 PCX86TSS32 pNewTss32 = (PCX86TSS32)pvNewTss;
2878 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTss32->cr3 : 0;
2879 uNewEip = pNewTss32->eip;
2880 uNewEflags = pNewTss32->eflags;
2881 uNewEax = pNewTss32->eax;
2882 uNewEcx = pNewTss32->ecx;
2883 uNewEdx = pNewTss32->edx;
2884 uNewEbx = pNewTss32->ebx;
2885 uNewEsp = pNewTss32->esp;
2886 uNewEbp = pNewTss32->ebp;
2887 uNewEsi = pNewTss32->esi;
2888 uNewEdi = pNewTss32->edi;
2889 uNewES = pNewTss32->es;
2890 uNewCS = pNewTss32->cs;
2891 uNewSS = pNewTss32->ss;
2892 uNewDS = pNewTss32->ds;
2893 uNewFS = pNewTss32->fs;
2894 uNewGS = pNewTss32->gs;
2895 uNewLdt = pNewTss32->selLdt;
2896 fNewDebugTrap = RT_BOOL(pNewTss32->fDebugTrap);
2897 }
2898 else
2899 {
2900 PCX86TSS16 pNewTss16 = (PCX86TSS16)pvNewTss;
2901 uNewCr3 = 0;
2902 uNewEip = pNewTss16->ip;
2903 uNewEflags = pNewTss16->flags;
2904 uNewEax = UINT32_C(0xffff0000) | pNewTss16->ax;
2905 uNewEcx = UINT32_C(0xffff0000) | pNewTss16->cx;
2906 uNewEdx = UINT32_C(0xffff0000) | pNewTss16->dx;
2907 uNewEbx = UINT32_C(0xffff0000) | pNewTss16->bx;
2908 uNewEsp = UINT32_C(0xffff0000) | pNewTss16->sp;
2909 uNewEbp = UINT32_C(0xffff0000) | pNewTss16->bp;
2910 uNewEsi = UINT32_C(0xffff0000) | pNewTss16->si;
2911 uNewEdi = UINT32_C(0xffff0000) | pNewTss16->di;
2912 uNewES = pNewTss16->es;
2913 uNewCS = pNewTss16->cs;
2914 uNewSS = pNewTss16->ss;
2915 uNewDS = pNewTss16->ds;
2916 uNewFS = 0;
2917 uNewGS = 0;
2918 uNewLdt = pNewTss16->selLdt;
2919 fNewDebugTrap = false;
2920 }
2921
2922 if (GCPtrNewTss == GCPtrCurTss)
2923 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2924 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2925
2926 /*
2927 * We're done accessing the new TSS.
2928 */
2929 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
2930 if (rcStrict != VINF_SUCCESS)
2931 {
2932 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2933 return rcStrict;
2934 }
2935
2936 /*
2937 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2938 */
2939 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2940 {
2941 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTss, &bUnmapInfoNewTss, sizeof(*pNewDescTss), UINT8_MAX,
2942 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTss & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2943 if (rcStrict != VINF_SUCCESS)
2944 {
2945 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2946 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2947 return rcStrict;
2948 }
2949
2950 /* Check that the descriptor indicates the new TSS is available (not busy). */
2951 AssertMsg( pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2952 || pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2953 ("Invalid TSS descriptor type=%#x", pNewDescTss->Legacy.Gate.u4Type));
2954
2955 pNewDescTss->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2956 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
2957 if (rcStrict != VINF_SUCCESS)
2958 {
2959 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2960 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2961 return rcStrict;
2962 }
2963 }
2964
2965 /*
2966 * From this point on, we're technically in the new task. We will defer exceptions
2967 * until the completion of the task switch but before executing any instructions in the new task.
2968 */
2969 pVCpu->cpum.GstCtx.tr.Sel = SelTss;
2970 pVCpu->cpum.GstCtx.tr.ValidSel = SelTss;
2971 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2972 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTss->Legacy);
2973 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTss->Legacy);
2974 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTss->Legacy);
2975 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
2976
2977 /* Set the busy bit in TR. */
2978 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2979
2980 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2981 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2982 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2983 {
2984 uNewEflags |= X86_EFL_NT;
2985 }
2986
2987 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2988 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
2989 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
2990
2991 pVCpu->cpum.GstCtx.eip = uNewEip;
2992 pVCpu->cpum.GstCtx.eax = uNewEax;
2993 pVCpu->cpum.GstCtx.ecx = uNewEcx;
2994 pVCpu->cpum.GstCtx.edx = uNewEdx;
2995 pVCpu->cpum.GstCtx.ebx = uNewEbx;
2996 pVCpu->cpum.GstCtx.esp = uNewEsp;
2997 pVCpu->cpum.GstCtx.ebp = uNewEbp;
2998 pVCpu->cpum.GstCtx.esi = uNewEsi;
2999 pVCpu->cpum.GstCtx.edi = uNewEdi;
3000
3001 uNewEflags &= X86_EFL_LIVE_MASK;
3002 uNewEflags |= X86_EFL_RA1_MASK;
3003 IEMMISC_SET_EFL(pVCpu, uNewEflags);
3004
3005 /*
3006 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
3007 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
3008 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
3009 */
3010 pVCpu->cpum.GstCtx.es.Sel = uNewES;
3011 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
3012
3013 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
3014 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
3015
3016 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
3017 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
3018
3019 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
3020 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
3021
3022 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
3023 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
3024
3025 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
3026 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
3027 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3028
3029 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
3030 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
3031 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
3032 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
3033
3034 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3035 {
3036 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
3037 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
3038 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
3039 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
3040 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
3041 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
3042 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
3043 }
3044
3045 /*
3046 * Switch CR3 for the new task.
3047 */
3048 if ( fIsNewTss386
3049 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
3050 {
3051 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
3052 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
3053 AssertRCSuccessReturn(rc, rc);
3054
3055 /* Inform PGM. */
3056 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
3057 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
3058 AssertRCReturn(rc, rc);
3059 /* ignore informational status codes */
3060
3061 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
3062 }
3063
3064 /*
3065 * Switch LDTR for the new task.
3066 */
3067 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
3068 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
3069 else
3070 {
3071 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
3072
3073 IEMSELDESC DescNewLdt;
3074 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
3075 if (rcStrict != VINF_SUCCESS)
3076 {
3077 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
3078 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
3079 return rcStrict;
3080 }
3081 if ( !DescNewLdt.Legacy.Gen.u1Present
3082 || DescNewLdt.Legacy.Gen.u1DescType
3083 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
3084 {
3085 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
3086 uNewLdt, DescNewLdt.Legacy.u));
3087 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
3088 }
3089
3090 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
3091 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3092 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
3093 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
3094 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
3095 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3096 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
3097 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
3098 }
3099
3100 IEMSELDESC DescSS;
3101 if (IEM_IS_V86_MODE(pVCpu))
3102 {
3103 IEM_SET_CPL(pVCpu, 3);
3104 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
3105 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
3106 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
3107 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
3108 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
3109 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
3110
3111 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
3112 DescSS.Legacy.u = 0;
3113 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
3114 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
3115 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
3116 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
3117 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
3118 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
3119 DescSS.Legacy.Gen.u2Dpl = 3;
3120 }
3121 else
3122 {
3123 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
3124
3125 /*
3126 * Load the stack segment for the new task.
3127 */
3128 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
3129 {
3130 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
3131 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3132 }
3133
3134 /* Fetch the descriptor. */
3135 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
3136 if (rcStrict != VINF_SUCCESS)
3137 {
3138 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
3139 VBOXSTRICTRC_VAL(rcStrict)));
3140 return rcStrict;
3141 }
3142
3143 /* SS must be a data segment and writable. */
3144 if ( !DescSS.Legacy.Gen.u1DescType
3145 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3146 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
3147 {
3148 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
3149 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
3150 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3151 }
3152
3153 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
3154 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
3155 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
3156 {
3157 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
3158 uNewCpl));
3159 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3160 }
3161
3162 /* Is it there? */
3163 if (!DescSS.Legacy.Gen.u1Present)
3164 {
3165 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
3166 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3167 }
3168
3169 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
3170 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
3171
3172 /* Set the accessed bit before committing the result into SS. */
3173 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3174 {
3175 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
3176 if (rcStrict != VINF_SUCCESS)
3177 return rcStrict;
3178 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3179 }
3180
3181 /* Commit SS. */
3182 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
3183 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
3184 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3185 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
3186 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
3187 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3188 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
3189
3190 /* CPL has changed, update IEM before loading rest of segments. */
3191 IEM_SET_CPL(pVCpu, uNewCpl);
3192
3193 /*
3194 * Load the data segments for the new task.
3195 */
3196 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
3197 if (rcStrict != VINF_SUCCESS)
3198 return rcStrict;
3199 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
3200 if (rcStrict != VINF_SUCCESS)
3201 return rcStrict;
3202 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
3203 if (rcStrict != VINF_SUCCESS)
3204 return rcStrict;
3205 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
3206 if (rcStrict != VINF_SUCCESS)
3207 return rcStrict;
3208
3209 /*
3210 * Load the code segment for the new task.
3211 */
3212 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
3213 {
3214 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
3215 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3216 }
3217
3218 /* Fetch the descriptor. */
3219 IEMSELDESC DescCS;
3220 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
3221 if (rcStrict != VINF_SUCCESS)
3222 {
3223 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
3224 return rcStrict;
3225 }
3226
3227 /* CS must be a code segment. */
3228 if ( !DescCS.Legacy.Gen.u1DescType
3229 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3230 {
3231 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
3232 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
3233 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3234 }
3235
3236 /* For conforming CS, DPL must be less than or equal to the RPL. */
3237 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3238 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
3239 {
3240 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
3241 DescCS.Legacy.Gen.u2Dpl));
3242 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3243 }
3244
3245 /* For non-conforming CS, DPL must match RPL. */
3246 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3247 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
3248 {
3249 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
3250 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
3251 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3252 }
3253
3254 /* Is it there? */
3255 if (!DescCS.Legacy.Gen.u1Present)
3256 {
3257 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
3258 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3259 }
3260
3261 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
3262 u64Base = X86DESC_BASE(&DescCS.Legacy);
3263
3264 /* Set the accessed bit before committing the result into CS. */
3265 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3266 {
3267 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
3268 if (rcStrict != VINF_SUCCESS)
3269 return rcStrict;
3270 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3271 }
3272
3273 /* Commit CS. */
3274 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
3275 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
3276 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3277 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
3278 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
3279 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3280 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
3281 }
3282
3283 /* Make sure the CPU mode is correct. */
3284 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3285 if (fExecNew != pVCpu->iem.s.fExec)
3286 Log(("iemTaskSwitch: fExec %#x -> %#x (xor %#x)\n", pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3287 pVCpu->iem.s.fExec = fExecNew;
3288
3289 /** @todo Debug trap. */
3290 if (fIsNewTss386 && fNewDebugTrap)
3291 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
3292
3293 /*
3294 * Construct the error code masks based on what caused this task switch.
3295 * See Intel Instruction reference for INT.
3296 */
3297 uint16_t uExt;
3298 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
3299 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3300 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
3301 uExt = 1;
3302 else
3303 uExt = 0;
3304
3305 /*
3306 * Push any error code on to the new stack.
3307 */
3308 if (fFlags & IEM_XCPT_FLAGS_ERR)
3309 {
3310 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3311 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3312 uint8_t const cbStackFrame = fIsNewTss386 ? 4 : 2;
3313
3314 /* Check that there is sufficient space on the stack. */
3315 /** @todo Factor out segment limit checking for normal/expand down segments
3316 * into a separate function. */
3317 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3318 {
3319 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
3320 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
3321 {
3322 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3323 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
3324 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3325 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3326 }
3327 }
3328 else
3329 {
3330 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
3331 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3332 {
3333 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
3334 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3335 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3336 }
3337 }
3338
3339
3340 if (fIsNewTss386)
3341 rcStrict = iemMemStackPushU32(pVCpu, uErr);
3342 else
3343 rcStrict = iemMemStackPushU16(pVCpu, uErr);
3344 if (rcStrict != VINF_SUCCESS)
3345 {
3346 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
3347 fIsNewTss386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
3348 return rcStrict;
3349 }
3350 }
3351
3352 /* Check the new EIP against the new CS limit. */
3353 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
3354 {
3355 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
3356 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
3357 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3358 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
3359 }
3360
3361 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3362 pVCpu->cpum.GstCtx.ss.Sel));
3363 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3364}
3365
3366
3367/**
3368 * Implements exceptions and interrupts for protected mode.
3369 *
3370 * @returns VBox strict status code.
3371 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3372 * @param cbInstr The number of bytes to offset rIP by in the return
3373 * address.
3374 * @param u8Vector The interrupt / exception vector number.
3375 * @param fFlags The flags.
3376 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3377 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3378 */
3379static VBOXSTRICTRC
3380iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
3381 uint8_t cbInstr,
3382 uint8_t u8Vector,
3383 uint32_t fFlags,
3384 uint16_t uErr,
3385 uint64_t uCr2) RT_NOEXCEPT
3386{
3387 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3388
3389 /*
3390 * Read the IDT entry.
3391 */
3392 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3393 {
3394 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3395 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3396 }
3397 X86DESC Idte;
3398 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
3399 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
3400 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3401 {
3402 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3403 return rcStrict;
3404 }
3405 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x - from %04x:%08RX64 efl=%#x depth=%d\n",
3406 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3407 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow,
3408 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->iem.s.cXcptRecursions));
3409
3410 /*
3411 * Check the descriptor type, DPL and such.
3412 * ASSUMES this is done in the same order as described for call-gate calls.
3413 */
3414 if (Idte.Gate.u1DescType)
3415 {
3416 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3417 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3418 }
3419 bool fTaskGate = false;
3420 uint8_t f32BitGate = true;
3421 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3422 switch (Idte.Gate.u4Type)
3423 {
3424 case X86_SEL_TYPE_SYS_UNDEFINED:
3425 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3426 case X86_SEL_TYPE_SYS_LDT:
3427 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3428 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3429 case X86_SEL_TYPE_SYS_UNDEFINED2:
3430 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3431 case X86_SEL_TYPE_SYS_UNDEFINED3:
3432 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3433 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3434 case X86_SEL_TYPE_SYS_UNDEFINED4:
3435 {
3436 /** @todo check what actually happens when the type is wrong...
3437 * esp. call gates. */
3438 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3439 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3440 }
3441
3442 case X86_SEL_TYPE_SYS_286_INT_GATE:
3443 f32BitGate = false;
3444 RT_FALL_THRU();
3445 case X86_SEL_TYPE_SYS_386_INT_GATE:
3446 fEflToClear |= X86_EFL_IF;
3447 break;
3448
3449 case X86_SEL_TYPE_SYS_TASK_GATE:
3450 fTaskGate = true;
3451#ifndef IEM_IMPLEMENTS_TASKSWITCH
3452 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3453#endif
3454 break;
3455
3456 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3457 f32BitGate = false;
3458 break;
3459 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3460 break;
3461
3462 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3463 }
3464
3465 /* Check DPL against CPL if applicable. */
3466 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3467 {
3468 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3469 {
3470 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3471 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3472 }
3473 }
3474
3475 /* Is it there? */
3476 if (!Idte.Gate.u1Present)
3477 {
3478 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3479 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3480 }
3481
3482 /* Is it a task-gate? */
3483 if (fTaskGate)
3484 {
3485 /*
3486 * Construct the error code masks based on what caused this task switch.
3487 * See Intel Instruction reference for INT.
3488 */
3489 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3490 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
3491 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3492 RTSEL SelTss = Idte.Gate.u16Sel;
3493
3494 /*
3495 * Fetch the TSS descriptor in the GDT.
3496 */
3497 IEMSELDESC DescTSS;
3498 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTss, X86_XCPT_GP, (SelTss & uSelMask) | uExt);
3499 if (rcStrict != VINF_SUCCESS)
3500 {
3501 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTss,
3502 VBOXSTRICTRC_VAL(rcStrict)));
3503 return rcStrict;
3504 }
3505
3506 /* The TSS descriptor must be a system segment and be available (not busy). */
3507 if ( DescTSS.Legacy.Gen.u1DescType
3508 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3509 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3510 {
3511 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3512 u8Vector, SelTss, DescTSS.Legacy.au64));
3513 return iemRaiseGeneralProtectionFault(pVCpu, (SelTss & uSelMask) | uExt);
3514 }
3515
3516 /* The TSS must be present. */
3517 if (!DescTSS.Legacy.Gen.u1Present)
3518 {
3519 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTss, DescTSS.Legacy.au64));
3520 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTss & uSelMask) | uExt);
3521 }
3522
3523 /* Do the actual task switch. */
3524 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3525 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3526 fFlags, uErr, uCr2, SelTss, &DescTSS);
3527 }
3528
3529 /* A null CS is bad. */
3530 RTSEL NewCS = Idte.Gate.u16Sel;
3531 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3532 {
3533 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3534 return iemRaiseGeneralProtectionFault0(pVCpu);
3535 }
3536
3537 /* Fetch the descriptor for the new CS. */
3538 IEMSELDESC DescCS;
3539 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3540 if (rcStrict != VINF_SUCCESS)
3541 {
3542 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3543 return rcStrict;
3544 }
3545
3546 /* Must be a code segment. */
3547 if (!DescCS.Legacy.Gen.u1DescType)
3548 {
3549 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3550 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3551 }
3552 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3553 {
3554 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3555 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3556 }
3557
3558 /* Don't allow lowering the privilege level. */
3559 /** @todo Does the lowering of privileges apply to software interrupts
3560 * only? This has bearings on the more-privileged or
3561 * same-privilege stack behavior further down. A testcase would
3562 * be nice. */
3563 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3564 {
3565 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3566 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3567 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3568 }
3569
3570 /* Make sure the selector is present. */
3571 if (!DescCS.Legacy.Gen.u1Present)
3572 {
3573 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3574 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3575 }
3576
3577#ifdef LOG_ENABLED
3578 /* If software interrupt, try decode it if logging is enabled and such. */
3579 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3580 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
3581 iemLogSyscallProtModeInt(pVCpu, u8Vector, cbInstr);
3582#endif
3583
3584 /* Check the new EIP against the new CS limit. */
3585 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3586 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3587 ? Idte.Gate.u16OffsetLow
3588 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3589 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3590 if (uNewEip > cbLimitCS)
3591 {
3592 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3593 u8Vector, uNewEip, cbLimitCS, NewCS));
3594 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3595 }
3596 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3597
3598 /* Calc the flag image to push. */
3599 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3600 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3601 fEfl &= ~X86_EFL_RF;
3602 else
3603 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3604
3605 /* From V8086 mode only go to CPL 0. */
3606 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3607 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3608 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3609 {
3610 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3611 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3612 }
3613
3614 /*
3615 * If the privilege level changes, we need to get a new stack from the TSS.
3616 * This in turns means validating the new SS and ESP...
3617 */
3618 if (uNewCpl != IEM_GET_CPL(pVCpu))
3619 {
3620 RTSEL NewSS;
3621 uint32_t uNewEsp;
3622 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3623 if (rcStrict != VINF_SUCCESS)
3624 return rcStrict;
3625
3626 IEMSELDESC DescSS;
3627 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3628 if (rcStrict != VINF_SUCCESS)
3629 return rcStrict;
3630 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3631 if (!DescSS.Legacy.Gen.u1DefBig)
3632 {
3633 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3634 uNewEsp = (uint16_t)uNewEsp;
3635 }
3636
3637 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3638
3639 /* Check that there is sufficient space for the stack frame. */
3640 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3641 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3642 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3643 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3644
3645 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3646 {
3647 if ( uNewEsp - 1 > cbLimitSS
3648 || uNewEsp < cbStackFrame)
3649 {
3650 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3651 u8Vector, NewSS, uNewEsp, cbStackFrame));
3652 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3653 }
3654 }
3655 else
3656 {
3657 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3658 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3659 {
3660 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3661 u8Vector, NewSS, uNewEsp, cbStackFrame));
3662 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3663 }
3664 }
3665
3666 /*
3667 * Start making changes.
3668 */
3669
3670 /* Set the new CPL so that stack accesses use it. */
3671 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3672 IEM_SET_CPL(pVCpu, uNewCpl);
3673
3674 /* Create the stack frame. */
3675 uint8_t bUnmapInfoStackFrame;
3676 RTPTRUNION uStackFrame;
3677 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
3678 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
3679 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3680 if (rcStrict != VINF_SUCCESS)
3681 return rcStrict;
3682 if (f32BitGate)
3683 {
3684 if (fFlags & IEM_XCPT_FLAGS_ERR)
3685 *uStackFrame.pu32++ = uErr;
3686 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3687 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3688 uStackFrame.pu32[2] = fEfl;
3689 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3690 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3691 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3692 if (fEfl & X86_EFL_VM)
3693 {
3694 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3695 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3696 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3697 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3698 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3699 }
3700 }
3701 else
3702 {
3703 if (fFlags & IEM_XCPT_FLAGS_ERR)
3704 *uStackFrame.pu16++ = uErr;
3705 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3706 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3707 uStackFrame.pu16[2] = fEfl;
3708 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3709 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3710 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3711 if (fEfl & X86_EFL_VM)
3712 {
3713 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3714 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3715 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3716 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3717 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3718 }
3719 }
3720 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
3721 if (rcStrict != VINF_SUCCESS)
3722 return rcStrict;
3723
3724 /* Mark the selectors 'accessed' (hope this is the correct time). */
3725 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3726 * after pushing the stack frame? (Write protect the gdt + stack to
3727 * find out.) */
3728 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3729 {
3730 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3731 if (rcStrict != VINF_SUCCESS)
3732 return rcStrict;
3733 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3734 }
3735
3736 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3737 {
3738 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3739 if (rcStrict != VINF_SUCCESS)
3740 return rcStrict;
3741 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3742 }
3743
3744 /*
3745 * Start comitting the register changes (joins with the DPL=CPL branch).
3746 */
3747 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3748 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3749 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3750 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3751 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3752 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3753 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3754 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3755 * SP is loaded).
3756 * Need to check the other combinations too:
3757 * - 16-bit TSS, 32-bit handler
3758 * - 32-bit TSS, 16-bit handler */
3759 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3760 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3761 else
3762 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3763
3764 if (fEfl & X86_EFL_VM)
3765 {
3766 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3767 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3768 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3769 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3770 }
3771 }
3772 /*
3773 * Same privilege, no stack change and smaller stack frame.
3774 */
3775 else
3776 {
3777 uint64_t uNewRsp;
3778 uint8_t bUnmapInfoStackFrame;
3779 RTPTRUNION uStackFrame;
3780 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3781 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1,
3782 &uStackFrame.pv, &bUnmapInfoStackFrame, &uNewRsp);
3783 if (rcStrict != VINF_SUCCESS)
3784 return rcStrict;
3785
3786 if (f32BitGate)
3787 {
3788 if (fFlags & IEM_XCPT_FLAGS_ERR)
3789 *uStackFrame.pu32++ = uErr;
3790 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3791 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3792 uStackFrame.pu32[2] = fEfl;
3793 }
3794 else
3795 {
3796 if (fFlags & IEM_XCPT_FLAGS_ERR)
3797 *uStackFrame.pu16++ = uErr;
3798 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3799 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3800 uStackFrame.pu16[2] = fEfl;
3801 }
3802 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame); /* don't use the commit here */
3803 if (rcStrict != VINF_SUCCESS)
3804 return rcStrict;
3805
3806 /* Mark the CS selector as 'accessed'. */
3807 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3808 {
3809 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3810 if (rcStrict != VINF_SUCCESS)
3811 return rcStrict;
3812 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3813 }
3814
3815 /*
3816 * Start committing the register changes (joins with the other branch).
3817 */
3818 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3819 }
3820
3821 /* ... register committing continues. */
3822 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3823 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3824 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3825 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3826 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3827 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3828
3829 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3830 fEfl &= ~fEflToClear;
3831 IEMMISC_SET_EFL(pVCpu, fEfl);
3832
3833 if (fFlags & IEM_XCPT_FLAGS_CR2)
3834 pVCpu->cpum.GstCtx.cr2 = uCr2;
3835
3836 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3837 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3838
3839 /* Make sure the execution flags are correct. */
3840 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3841 if (fExecNew != pVCpu->iem.s.fExec)
3842 Log(("iemRaiseXcptOrIntInProtMode: fExec %#x -> %#x (xor %#x)\n",
3843 pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3844 pVCpu->iem.s.fExec = fExecNew;
3845 Assert(IEM_GET_CPL(pVCpu) == uNewCpl);
3846
3847 /*
3848 * Deal with debug events that follows the exception and clear inhibit flags.
3849 */
3850 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3851 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))
3852 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
3853 else
3854 {
3855 Log(("iemRaiseXcptOrIntInProtMode: Raising #DB after %#x; pending=%#x\n",
3856 u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));
3857 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
3858 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
3859 >> CPUMCTX_DBG_HIT_DRX_SHIFT;
3860 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
3861 return iemRaiseDebugException(pVCpu);
3862 }
3863
3864 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3865}
3866
3867
3868/**
3869 * Implements exceptions and interrupts for long mode.
3870 *
3871 * @returns VBox strict status code.
3872 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3873 * @param cbInstr The number of bytes to offset rIP by in the return
3874 * address.
3875 * @param u8Vector The interrupt / exception vector number.
3876 * @param fFlags The flags.
3877 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3878 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3879 */
3880static VBOXSTRICTRC
3881iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3882 uint8_t cbInstr,
3883 uint8_t u8Vector,
3884 uint32_t fFlags,
3885 uint16_t uErr,
3886 uint64_t uCr2) RT_NOEXCEPT
3887{
3888 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3889
3890 /*
3891 * Read the IDT entry.
3892 */
3893 uint16_t offIdt = (uint16_t)u8Vector << 4;
3894 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3895 {
3896 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3897 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3898 }
3899 X86DESC64 Idte;
3900#ifdef _MSC_VER /* Shut up silly compiler warning. */
3901 Idte.au64[0] = 0;
3902 Idte.au64[1] = 0;
3903#endif
3904 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3905 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3906 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3907 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3908 {
3909 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3910 return rcStrict;
3911 }
3912 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3913 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3914 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3915
3916 /*
3917 * Check the descriptor type, DPL and such.
3918 * ASSUMES this is done in the same order as described for call-gate calls.
3919 */
3920 if (Idte.Gate.u1DescType)
3921 {
3922 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3923 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3924 }
3925 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3926 switch (Idte.Gate.u4Type)
3927 {
3928 case AMD64_SEL_TYPE_SYS_INT_GATE:
3929 fEflToClear |= X86_EFL_IF;
3930 break;
3931 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3932 break;
3933
3934 default:
3935 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3936 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3937 }
3938
3939 /* Check DPL against CPL if applicable. */
3940 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3941 {
3942 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3943 {
3944 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3945 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3946 }
3947 }
3948
3949 /* Is it there? */
3950 if (!Idte.Gate.u1Present)
3951 {
3952 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3953 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3954 }
3955
3956 /* A null CS is bad. */
3957 RTSEL NewCS = Idte.Gate.u16Sel;
3958 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3959 {
3960 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3961 return iemRaiseGeneralProtectionFault0(pVCpu);
3962 }
3963
3964 /* Fetch the descriptor for the new CS. */
3965 IEMSELDESC DescCS;
3966 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
3967 if (rcStrict != VINF_SUCCESS)
3968 {
3969 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3970 return rcStrict;
3971 }
3972
3973 /* Must be a 64-bit code segment. */
3974 if (!DescCS.Long.Gen.u1DescType)
3975 {
3976 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3977 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3978 }
3979 if ( !DescCS.Long.Gen.u1Long
3980 || DescCS.Long.Gen.u1DefBig
3981 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3982 {
3983 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3984 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3985 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3986 }
3987
3988 /* Don't allow lowering the privilege level. For non-conforming CS
3989 selectors, the CS.DPL sets the privilege level the trap/interrupt
3990 handler runs at. For conforming CS selectors, the CPL remains
3991 unchanged, but the CS.DPL must be <= CPL. */
3992 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3993 * when CPU in Ring-0. Result \#GP? */
3994 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3995 {
3996 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3997 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3998 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3999 }
4000
4001
4002 /* Make sure the selector is present. */
4003 if (!DescCS.Legacy.Gen.u1Present)
4004 {
4005 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4006 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4007 }
4008
4009 /* Check that the new RIP is canonical. */
4010 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
4011 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
4012 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
4013 if (!IEM_IS_CANONICAL(uNewRip))
4014 {
4015 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
4016 return iemRaiseGeneralProtectionFault0(pVCpu);
4017 }
4018
4019 /*
4020 * If the privilege level changes or if the IST isn't zero, we need to get
4021 * a new stack from the TSS.
4022 */
4023 uint64_t uNewRsp;
4024 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4025 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
4026 if ( uNewCpl != IEM_GET_CPL(pVCpu)
4027 || Idte.Gate.u3IST != 0)
4028 {
4029 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
4030 if (rcStrict != VINF_SUCCESS)
4031 return rcStrict;
4032 }
4033 else
4034 uNewRsp = pVCpu->cpum.GstCtx.rsp;
4035 uNewRsp &= ~(uint64_t)0xf;
4036
4037 /*
4038 * Calc the flag image to push.
4039 */
4040 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
4041 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4042 fEfl &= ~X86_EFL_RF;
4043 else
4044 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4045
4046 /*
4047 * Start making changes.
4048 */
4049 /* Set the new CPL so that stack accesses use it. */
4050 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
4051 IEM_SET_CPL(pVCpu, uNewCpl);
4052/** @todo Setting CPL this early seems wrong as it would affect and errors we
4053 * raise accessing the stack and (?) GDT/LDT... */
4054
4055 /* Create the stack frame. */
4056 uint8_t bUnmapInfoStackFrame;
4057 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
4058 RTPTRUNION uStackFrame;
4059 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
4060 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
4061 if (rcStrict != VINF_SUCCESS)
4062 return rcStrict;
4063
4064 if (fFlags & IEM_XCPT_FLAGS_ERR)
4065 *uStackFrame.pu64++ = uErr;
4066 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
4067 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
4068 uStackFrame.pu64[2] = fEfl;
4069 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
4070 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
4071 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
4072 if (rcStrict != VINF_SUCCESS)
4073 return rcStrict;
4074
4075 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
4076 /** @todo testcase: excatly _when_ are the accessed bits set - before or
4077 * after pushing the stack frame? (Write protect the gdt + stack to
4078 * find out.) */
4079 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4080 {
4081 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4082 if (rcStrict != VINF_SUCCESS)
4083 return rcStrict;
4084 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4085 }
4086
4087 /*
4088 * Start comitting the register changes.
4089 */
4090 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
4091 * hidden registers when interrupting 32-bit or 16-bit code! */
4092 if (uNewCpl != uOldCpl)
4093 {
4094 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
4095 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
4096 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4097 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
4098 pVCpu->cpum.GstCtx.ss.u64Base = 0;
4099 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
4100 }
4101 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
4102 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4103 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4104 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4105 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
4106 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
4107 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4108 pVCpu->cpum.GstCtx.rip = uNewRip;
4109
4110 fEfl &= ~fEflToClear;
4111 IEMMISC_SET_EFL(pVCpu, fEfl);
4112
4113 if (fFlags & IEM_XCPT_FLAGS_CR2)
4114 pVCpu->cpum.GstCtx.cr2 = uCr2;
4115
4116 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
4117 iemRaiseXcptAdjustState(pVCpu, u8Vector);
4118
4119 iemRecalcExecModeAndCplAndAcFlags(pVCpu);
4120
4121 /*
4122 * Deal with debug events that follows the exception and clear inhibit flags.
4123 */
4124 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4125 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))
4126 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
4127 else
4128 {
4129 Log(("iemRaiseXcptOrIntInLongMode: Raising #DB after %#x; pending=%#x\n",
4130 u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));
4131 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
4132 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
4133 >> CPUMCTX_DBG_HIT_DRX_SHIFT;
4134 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
4135 return iemRaiseDebugException(pVCpu);
4136 }
4137
4138 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4139}
4140
4141
4142/**
4143 * Implements exceptions and interrupts.
4144 *
4145 * All exceptions and interrupts goes thru this function!
4146 *
4147 * @returns VBox strict status code.
4148 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4149 * @param cbInstr The number of bytes to offset rIP by in the return
4150 * address.
4151 * @param u8Vector The interrupt / exception vector number.
4152 * @param fFlags The flags.
4153 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4154 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4155 */
4156VBOXSTRICTRC
4157iemRaiseXcptOrInt(PVMCPUCC pVCpu,
4158 uint8_t cbInstr,
4159 uint8_t u8Vector,
4160 uint32_t fFlags,
4161 uint16_t uErr,
4162 uint64_t uCr2) RT_NOEXCEPT
4163{
4164 /*
4165 * Get all the state that we might need here.
4166 */
4167 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4168 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4169
4170#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
4171 /*
4172 * Flush prefetch buffer
4173 */
4174 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
4175#endif
4176
4177 /*
4178 * Perform the V8086 IOPL check and upgrade the fault without nesting.
4179 */
4180 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
4181 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
4182 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
4183 | IEM_XCPT_FLAGS_BP_INSTR
4184 | IEM_XCPT_FLAGS_ICEBP_INSTR
4185 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
4186 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
4187 {
4188 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
4189 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
4190 u8Vector = X86_XCPT_GP;
4191 uErr = 0;
4192 }
4193
4194 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
4195#ifdef DBGFTRACE_ENABLED
4196 RTTraceBufAddMsgF(pVM->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
4197 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
4198 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
4199#endif
4200
4201 /*
4202 * Check if DBGF wants to intercept the exception.
4203 */
4204 if ( (fFlags & (IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_T_SOFT_INT))
4205 || !DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector)) )
4206 { /* likely */ }
4207 else
4208 {
4209 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector),
4210 DBGFEVENTCTX_INVALID, 1, (uint64_t)uErr);
4211 if (rcStrict != VINF_SUCCESS)
4212 return rcStrict;
4213 }
4214
4215 /*
4216 * Evaluate whether NMI blocking should be in effect.
4217 * Normally, NMI blocking is in effect whenever we inject an NMI.
4218 */
4219 bool fBlockNmi = u8Vector == X86_XCPT_NMI
4220 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);
4221
4222#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4223 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4224 {
4225 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
4226 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4227 return rcStrict0;
4228
4229 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
4230 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
4231 {
4232 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
4233 fBlockNmi = false;
4234 }
4235 }
4236#endif
4237
4238#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
4239 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
4240 {
4241 /*
4242 * If the event is being injected as part of VMRUN, it isn't subject to event
4243 * intercepts in the nested-guest. However, secondary exceptions that occur
4244 * during injection of any event -are- subject to exception intercepts.
4245 *
4246 * See AMD spec. 15.20 "Event Injection".
4247 */
4248 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
4249 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
4250 else
4251 {
4252 /*
4253 * Check and handle if the event being raised is intercepted.
4254 */
4255 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4256 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
4257 return rcStrict0;
4258 }
4259 }
4260#endif
4261
4262 /*
4263 * Set NMI blocking if necessary.
4264 */
4265 if (fBlockNmi)
4266 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
4267
4268 /*
4269 * Do recursion accounting.
4270 */
4271 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
4272 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
4273 if (pVCpu->iem.s.cXcptRecursions == 0)
4274 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
4275 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
4276 else
4277 {
4278 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
4279 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
4280 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
4281
4282 if (pVCpu->iem.s.cXcptRecursions >= 4)
4283 {
4284#ifdef DEBUG_bird
4285 AssertFailed();
4286#endif
4287 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
4288 }
4289
4290 /*
4291 * Evaluate the sequence of recurring events.
4292 */
4293 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
4294 NULL /* pXcptRaiseInfo */);
4295 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
4296 { /* likely */ }
4297 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
4298 {
4299 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
4300 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
4301 u8Vector = X86_XCPT_DF;
4302 uErr = 0;
4303#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4304 /* VMX nested-guest #DF intercept needs to be checked here. */
4305 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4306 {
4307 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
4308 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4309 return rcStrict0;
4310 }
4311#endif
4312 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
4313 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
4314 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4315 }
4316 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
4317 {
4318 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
4319 return iemInitiateCpuShutdown(pVCpu);
4320 }
4321 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
4322 {
4323 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
4324 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
4325 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
4326 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
4327 return VERR_EM_GUEST_CPU_HANG;
4328 }
4329 else
4330 {
4331 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
4332 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
4333 return VERR_IEM_IPE_9;
4334 }
4335
4336 /*
4337 * The 'EXT' bit is set when an exception occurs during deliver of an external
4338 * event (such as an interrupt or earlier exception)[1]. Privileged software
4339 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
4340 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
4341 *
4342 * [1] - Intel spec. 6.13 "Error Code"
4343 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
4344 * [3] - Intel Instruction reference for INT n.
4345 */
4346 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
4347 && (fFlags & IEM_XCPT_FLAGS_ERR)
4348 && u8Vector != X86_XCPT_PF
4349 && u8Vector != X86_XCPT_DF)
4350 {
4351 uErr |= X86_TRAP_ERR_EXTERNAL;
4352 }
4353 }
4354
4355 pVCpu->iem.s.cXcptRecursions++;
4356 pVCpu->iem.s.uCurXcpt = u8Vector;
4357 pVCpu->iem.s.fCurXcpt = fFlags;
4358 pVCpu->iem.s.uCurXcptErr = uErr;
4359 pVCpu->iem.s.uCurXcptCr2 = uCr2;
4360
4361 /*
4362 * Extensive logging.
4363 */
4364#if defined(LOG_ENABLED) && defined(IN_RING3)
4365 if (LogIs3Enabled())
4366 {
4367 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
4368 char szRegs[4096];
4369 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4370 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4371 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4372 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4373 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4374 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4375 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4376 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4377 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4378 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4379 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4380 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4381 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4382 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4383 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4384 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4385 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4386 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4387 " efer=%016VR{efer}\n"
4388 " pat=%016VR{pat}\n"
4389 " sf_mask=%016VR{sf_mask}\n"
4390 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4391 " lstar=%016VR{lstar}\n"
4392 " star=%016VR{star} cstar=%016VR{cstar}\n"
4393 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4394 );
4395
4396 char szInstr[256];
4397 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4398 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4399 szInstr, sizeof(szInstr), NULL);
4400 Log3(("%s%s\n", szRegs, szInstr));
4401 }
4402#endif /* LOG_ENABLED */
4403
4404 /*
4405 * Stats.
4406 */
4407 uint64_t const uTimestamp = ASMReadTSC();
4408 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
4409 {
4410 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
4411 EMHistoryAddExit(pVCpu,
4412 fFlags & IEM_XCPT_FLAGS_T_EXT_INT
4413 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_IEM, u8Vector)
4414 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_IEM, u8Vector | 0x100),
4415 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, uTimestamp);
4416 }
4417 else
4418 {
4419 if (u8Vector < RT_ELEMENTS(pVCpu->iem.s.aStatXcpts))
4420 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
4421 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
4422 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, uTimestamp);
4423 if (fFlags & IEM_XCPT_FLAGS_ERR)
4424 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector | EMEXIT_F_XCPT_ERRCD), uErr, uTimestamp);
4425 if (fFlags & IEM_XCPT_FLAGS_CR2)
4426 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector | EMEXIT_F_XCPT_CR2), uCr2, uTimestamp);
4427 }
4428
4429 /*
4430 * Hack alert! Convert incoming debug events to slient on Intel.
4431 * See the dbg+inhibit+ringxfer test in bs3-cpu-weird-1.
4432 */
4433 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4434 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
4435 || !IEM_IS_GUEST_CPU_INTEL(pVCpu))
4436 { /* ignore */ }
4437 else
4438 {
4439 Log(("iemRaiseXcptOrInt: Converting pending %#x debug events to a silent one (intel hack); vec=%#x\n",
4440 pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK, u8Vector));
4441 pVCpu->cpum.GstCtx.eflags.uBoth = (pVCpu->cpum.GstCtx.eflags.uBoth & ~CPUMCTX_DBG_HIT_DRX_MASK)
4442 | CPUMCTX_DBG_HIT_DRX_SILENT;
4443 }
4444
4445 /*
4446 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
4447 * to ensure that a stale TLB or paging cache entry will only cause one
4448 * spurious #PF.
4449 */
4450 if ( u8Vector == X86_XCPT_PF
4451 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
4452 IEMTlbInvalidatePage(pVCpu, uCr2);
4453
4454 /*
4455 * Call the mode specific worker function.
4456 */
4457 VBOXSTRICTRC rcStrict;
4458 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4459 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4460 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
4461 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4462 else
4463 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4464
4465 /* Flush the prefetch buffer. */
4466 iemOpcodeFlushHeavy(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
4467
4468 /*
4469 * Unwind.
4470 */
4471 pVCpu->iem.s.cXcptRecursions--;
4472 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
4473 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
4474 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
4475 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel,
4476 pVCpu->cpum.GstCtx.esp, IEM_GET_CPL(pVCpu), pVCpu->iem.s.cXcptRecursions + 1));
4477 return rcStrict;
4478}
4479
4480#ifdef IEM_WITH_SETJMP
4481/**
4482 * See iemRaiseXcptOrInt. Will not return.
4483 */
4484DECL_NO_RETURN(void)
4485iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
4486 uint8_t cbInstr,
4487 uint8_t u8Vector,
4488 uint32_t fFlags,
4489 uint16_t uErr,
4490 uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP
4491{
4492 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4493 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
4494}
4495#endif
4496
4497
4498/** \#DE - 00. */
4499VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
4500{
4501 if (GCMIsInterceptingXcptDE(pVCpu))
4502 {
4503 int rc = GCMXcptDE(pVCpu, &pVCpu->cpum.GstCtx);
4504 if (rc == VINF_SUCCESS)
4505 {
4506 Log(("iemRaiseDivideError: Restarting instruction because of GCMXcptDE\n"));
4507 return VINF_IEM_RAISED_XCPT; /* must return non-zero status here to cause a instruction restart */
4508 }
4509 }
4510 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4511}
4512
4513
4514#ifdef IEM_WITH_SETJMP
4515/** \#DE - 00. */
4516DECL_NO_RETURN(void) iemRaiseDivideErrorJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4517{
4518 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4519}
4520#endif
4521
4522
4523/** \#DB - 01.
4524 * @note This automatically clear DR7.GD. */
4525VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
4526{
4527 /* This always clears RF (via IEM_XCPT_FLAGS_DRx_INSTR_BP). */
4528 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
4529 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_DRx_INSTR_BP, 0, 0);
4530}
4531
4532
4533/** \#BR - 05. */
4534VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
4535{
4536 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4537}
4538
4539
4540/** \#UD - 06. */
4541VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
4542{
4543 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4544}
4545
4546
4547#ifdef IEM_WITH_SETJMP
4548/** \#UD - 06. */
4549DECL_NO_RETURN(void) iemRaiseUndefinedOpcodeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4550{
4551 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4552}
4553#endif
4554
4555
4556/** \#NM - 07. */
4557VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
4558{
4559 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4560}
4561
4562
4563#ifdef IEM_WITH_SETJMP
4564/** \#NM - 07. */
4565DECL_NO_RETURN(void) iemRaiseDeviceNotAvailableJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4566{
4567 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4568}
4569#endif
4570
4571
4572/** \#TS(err) - 0a. */
4573VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4574{
4575 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4576}
4577
4578
4579/** \#TS(tr) - 0a. */
4580VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
4581{
4582 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4583 pVCpu->cpum.GstCtx.tr.Sel, 0);
4584}
4585
4586
4587/** \#TS(0) - 0a. */
4588VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4589{
4590 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4591 0, 0);
4592}
4593
4594
4595/** \#TS(err) - 0a. */
4596VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4597{
4598 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4599 uSel & X86_SEL_MASK_OFF_RPL, 0);
4600}
4601
4602
4603/** \#NP(err) - 0b. */
4604VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4605{
4606 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4607}
4608
4609
4610/** \#NP(sel) - 0b. */
4611VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4612{
4613 Log(("iemRaiseSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4614 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4615 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4616 uSel & ~X86_SEL_RPL, 0);
4617}
4618
4619
4620/** \#SS(seg) - 0c. */
4621VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4622{
4623 Log(("iemRaiseStackSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4624 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4625 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4626 uSel & ~X86_SEL_RPL, 0);
4627}
4628
4629
4630/** \#SS(err) - 0c. */
4631VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4632{
4633 Log(("iemRaiseStackSelectorNotPresentWithErr: cs:rip=%04x:%RX64 uErr=%#x\n",
4634 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4635 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4636}
4637
4638
4639/** \#GP(n) - 0d. */
4640VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4641{
4642 Log(("iemRaiseGeneralProtectionFault: cs:rip=%04x:%RX64 uErr=%#x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4643 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4644}
4645
4646
4647/** \#GP(0) - 0d. */
4648VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4649{
4650 Log(("iemRaiseGeneralProtectionFault0: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4651 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4652}
4653
4654#ifdef IEM_WITH_SETJMP
4655/** \#GP(0) - 0d. */
4656DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4657{
4658 Log(("iemRaiseGeneralProtectionFault0Jmp: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4659 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4660}
4661#endif
4662
4663
4664/** \#GP(sel) - 0d. */
4665VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4666{
4667 Log(("iemRaiseGeneralProtectionFaultBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
4668 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4669 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4670 Sel & ~X86_SEL_RPL, 0);
4671}
4672
4673
4674/** \#GP(0) - 0d. */
4675VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4676{
4677 Log(("iemRaiseNotCanonical: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4678 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4679}
4680
4681
4682/** \#GP(sel) - 0d. */
4683VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4684{
4685 Log(("iemRaiseSelectorBounds: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4686 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4687 NOREF(iSegReg); NOREF(fAccess);
4688 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4689 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4690}
4691
4692#ifdef IEM_WITH_SETJMP
4693/** \#GP(sel) - 0d, longjmp. */
4694DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4695{
4696 Log(("iemRaiseSelectorBoundsJmp: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4697 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4698 NOREF(iSegReg); NOREF(fAccess);
4699 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4700 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4701}
4702#endif
4703
4704/** \#GP(sel) - 0d. */
4705VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4706{
4707 Log(("iemRaiseSelectorBoundsBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
4708 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4709 NOREF(Sel);
4710 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4711}
4712
4713#ifdef IEM_WITH_SETJMP
4714/** \#GP(sel) - 0d, longjmp. */
4715DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP
4716{
4717 Log(("iemRaiseSelectorBoundsBySelectorJmp: cs:rip=%04x:%RX64 Sel=%#x\n",
4718 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4719 NOREF(Sel);
4720 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4721}
4722#endif
4723
4724
4725/** \#GP(sel) - 0d. */
4726VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4727{
4728 Log(("iemRaiseSelectorInvalidAccess: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4729 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4730 NOREF(iSegReg); NOREF(fAccess);
4731 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4732}
4733
4734#ifdef IEM_WITH_SETJMP
4735/** \#GP(sel) - 0d, longjmp. */
4736DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4737{
4738 NOREF(iSegReg); NOREF(fAccess);
4739 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4740}
4741#endif
4742
4743
4744/** \#PF(n) - 0e. */
4745VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT
4746{
4747 uint16_t uErr;
4748 switch (rc)
4749 {
4750 case VERR_PAGE_NOT_PRESENT:
4751 case VERR_PAGE_TABLE_NOT_PRESENT:
4752 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4753 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4754 uErr = 0;
4755 break;
4756
4757 case VERR_RESERVED_PAGE_TABLE_BITS:
4758 uErr = X86_TRAP_PF_P | X86_TRAP_PF_RSVD;
4759 break;
4760
4761 default:
4762 AssertMsgFailed(("%Rrc\n", rc));
4763 RT_FALL_THRU();
4764 case VERR_ACCESS_DENIED:
4765 uErr = X86_TRAP_PF_P;
4766 break;
4767 }
4768
4769 if (IEM_GET_CPL(pVCpu) == 3)
4770 uErr |= X86_TRAP_PF_US;
4771
4772 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4773 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4774 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4775 uErr |= X86_TRAP_PF_ID;
4776
4777#if 0 /* This is so much non-sense, really. Why was it done like that? */
4778 /* Note! RW access callers reporting a WRITE protection fault, will clear
4779 the READ flag before calling. So, read-modify-write accesses (RW)
4780 can safely be reported as READ faults. */
4781 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4782 uErr |= X86_TRAP_PF_RW;
4783#else
4784 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4785 {
4786 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4787 /// (regardless of outcome of the comparison in the latter case).
4788 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4789 uErr |= X86_TRAP_PF_RW;
4790 }
4791#endif
4792
4793 /* For FXSAVE and FRSTOR the #PF is typically reported at the max address
4794 of the memory operand rather than at the start of it. (Not sure what
4795 happens if it crosses a page boundrary.) The current heuristics for
4796 this is to report the #PF for the last byte if the access is more than
4797 64 bytes. This is probably not correct, but we can work that out later,
4798 main objective now is to get FXSAVE to work like for real hardware and
4799 make bs3-cpu-basic2 work. */
4800 if (cbAccess <= 64)
4801 { /* likely*/ }
4802 else
4803 GCPtrWhere += cbAccess - 1;
4804
4805 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4806 uErr, GCPtrWhere);
4807}
4808
4809#ifdef IEM_WITH_SETJMP
4810/** \#PF(n) - 0e, longjmp. */
4811DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess,
4812 uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP
4813{
4814 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, cbAccess, fAccess, rc)));
4815}
4816#endif
4817
4818
4819/** \#MF(0) - 10. */
4820VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT
4821{
4822 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)
4823 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4824
4825 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
4826 PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
4827 return iemRegUpdateRipAndFinishClearingRF(pVCpu);
4828}
4829
4830#ifdef IEM_WITH_SETJMP
4831/** \#MF(0) - 10, longjmp. */
4832DECL_NO_RETURN(void) iemRaiseMathFaultJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4833{
4834 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseMathFault(pVCpu)));
4835}
4836#endif
4837
4838
4839/** \#AC(0) - 11. */
4840VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT
4841{
4842 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4843}
4844
4845#ifdef IEM_WITH_SETJMP
4846/** \#AC(0) - 11, longjmp. */
4847DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4848{
4849 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4850}
4851#endif
4852
4853
4854/** \#XF(0)/\#XM(0) - 19. */
4855VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
4856{
4857 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4858}
4859
4860
4861#ifdef IEM_WITH_SETJMP
4862/** \#XF(0)/\#XM(0) - 19s, longjmp. */
4863DECL_NO_RETURN(void) iemRaiseSimdFpExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4864{
4865 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseSimdFpException(pVCpu)));
4866}
4867#endif
4868
4869
4870/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4871IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4872{
4873 NOREF(cbInstr);
4874 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4875}
4876
4877
4878/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4879IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4880{
4881 NOREF(cbInstr);
4882 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4883}
4884
4885
4886/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4887IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4888{
4889 NOREF(cbInstr);
4890 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4891}
4892
4893
4894/** @} */
4895
4896/** @name Common opcode decoders.
4897 * @{
4898 */
4899//#include <iprt/mem.h>
4900
4901/**
4902 * Used to add extra details about a stub case.
4903 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4904 */
4905void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4906{
4907#if defined(LOG_ENABLED) && defined(IN_RING3)
4908 PVM pVM = pVCpu->CTX_SUFF(pVM);
4909 char szRegs[4096];
4910 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4911 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4912 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4913 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4914 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4915 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4916 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4917 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4918 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4919 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4920 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4921 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4922 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4923 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4924 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4925 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4926 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4927 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4928 " efer=%016VR{efer}\n"
4929 " pat=%016VR{pat}\n"
4930 " sf_mask=%016VR{sf_mask}\n"
4931 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4932 " lstar=%016VR{lstar}\n"
4933 " star=%016VR{star} cstar=%016VR{cstar}\n"
4934 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4935 );
4936
4937 char szInstr[256];
4938 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4939 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4940 szInstr, sizeof(szInstr), NULL);
4941
4942 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4943#else
4944 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4945#endif
4946}
4947
4948/** @} */
4949
4950
4951
4952/** @name Register Access.
4953 * @{
4954 */
4955
4956/**
4957 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4958 *
4959 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4960 * segment limit.
4961 *
4962 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4963 * @param cbInstr Instruction size.
4964 * @param offNextInstr The offset of the next instruction.
4965 * @param enmEffOpSize Effective operand size.
4966 */
4967VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
4968 IEMMODE enmEffOpSize) RT_NOEXCEPT
4969{
4970 switch (enmEffOpSize)
4971 {
4972 case IEMMODE_16BIT:
4973 {
4974 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
4975 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4976 || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */))
4977 pVCpu->cpum.GstCtx.rip = uNewIp;
4978 else
4979 return iemRaiseGeneralProtectionFault0(pVCpu);
4980 break;
4981 }
4982
4983 case IEMMODE_32BIT:
4984 {
4985 Assert(!IEM_IS_64BIT_CODE(pVCpu));
4986 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4987
4988 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
4989 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4990 pVCpu->cpum.GstCtx.rip = uNewEip;
4991 else
4992 return iemRaiseGeneralProtectionFault0(pVCpu);
4993 break;
4994 }
4995
4996 case IEMMODE_64BIT:
4997 {
4998 Assert(IEM_IS_64BIT_CODE(pVCpu));
4999
5000 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
5001 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
5002 pVCpu->cpum.GstCtx.rip = uNewRip;
5003 else
5004 return iemRaiseGeneralProtectionFault0(pVCpu);
5005 break;
5006 }
5007
5008 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5009 }
5010
5011#ifndef IEM_WITH_CODE_TLB
5012 /* Flush the prefetch buffer. */
5013 pVCpu->iem.s.cbOpcode = cbInstr;
5014#endif
5015
5016 /*
5017 * Clear RF and finish the instruction (maybe raise #DB).
5018 */
5019 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
5020}
5021
5022
5023/**
5024 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
5025 *
5026 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5027 * segment limit.
5028 *
5029 * @returns Strict VBox status code.
5030 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5031 * @param cbInstr Instruction size.
5032 * @param offNextInstr The offset of the next instruction.
5033 */
5034VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT
5035{
5036 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
5037
5038 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
5039 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
5040 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checking in 64-bit mode */))
5041 pVCpu->cpum.GstCtx.rip = uNewIp;
5042 else
5043 return iemRaiseGeneralProtectionFault0(pVCpu);
5044
5045#ifndef IEM_WITH_CODE_TLB
5046 /* Flush the prefetch buffer. */
5047 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5048#endif
5049
5050 /*
5051 * Clear RF and finish the instruction (maybe raise #DB).
5052 */
5053 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
5054}
5055
5056
5057/**
5058 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
5059 *
5060 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5061 * segment limit.
5062 *
5063 * @returns Strict VBox status code.
5064 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5065 * @param cbInstr Instruction size.
5066 * @param offNextInstr The offset of the next instruction.
5067 * @param enmEffOpSize Effective operand size.
5068 */
5069VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
5070 IEMMODE enmEffOpSize) RT_NOEXCEPT
5071{
5072 if (enmEffOpSize == IEMMODE_32BIT)
5073 {
5074 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
5075
5076 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
5077 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
5078 pVCpu->cpum.GstCtx.rip = uNewEip;
5079 else
5080 return iemRaiseGeneralProtectionFault0(pVCpu);
5081 }
5082 else
5083 {
5084 Assert(enmEffOpSize == IEMMODE_64BIT);
5085
5086 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
5087 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
5088 pVCpu->cpum.GstCtx.rip = uNewRip;
5089 else
5090 return iemRaiseGeneralProtectionFault0(pVCpu);
5091 }
5092
5093#ifndef IEM_WITH_CODE_TLB
5094 /* Flush the prefetch buffer. */
5095 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5096#endif
5097
5098 /*
5099 * Clear RF and finish the instruction (maybe raise #DB).
5100 */
5101 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
5102}
5103
5104/** @} */
5105
5106
5107/** @name FPU access and helpers.
5108 *
5109 * @{
5110 */
5111
5112/**
5113 * Updates the x87.DS and FPUDP registers.
5114 *
5115 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5116 * @param pFpuCtx The FPU context.
5117 * @param iEffSeg The effective segment register.
5118 * @param GCPtrEff The effective address relative to @a iEffSeg.
5119 */
5120DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5121{
5122 RTSEL sel;
5123 switch (iEffSeg)
5124 {
5125 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
5126 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
5127 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
5128 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
5129 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
5130 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
5131 default:
5132 AssertMsgFailed(("%d\n", iEffSeg));
5133 sel = pVCpu->cpum.GstCtx.ds.Sel;
5134 }
5135 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
5136 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
5137 {
5138 pFpuCtx->DS = 0;
5139 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
5140 }
5141 else if (!IEM_IS_LONG_MODE(pVCpu)) /** @todo this is weird. explain. */
5142 {
5143 pFpuCtx->DS = sel;
5144 pFpuCtx->FPUDP = GCPtrEff;
5145 }
5146 else
5147 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
5148}
5149
5150
5151/**
5152 * Rotates the stack registers in the push direction.
5153 *
5154 * @param pFpuCtx The FPU context.
5155 * @remarks This is a complete waste of time, but fxsave stores the registers in
5156 * stack order.
5157 */
5158DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
5159{
5160 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
5161 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
5162 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
5163 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
5164 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
5165 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
5166 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
5167 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
5168 pFpuCtx->aRegs[0].r80 = r80Tmp;
5169}
5170
5171
5172/**
5173 * Rotates the stack registers in the pop direction.
5174 *
5175 * @param pFpuCtx The FPU context.
5176 * @remarks This is a complete waste of time, but fxsave stores the registers in
5177 * stack order.
5178 */
5179DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
5180{
5181 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
5182 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
5183 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
5184 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
5185 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
5186 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
5187 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
5188 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
5189 pFpuCtx->aRegs[7].r80 = r80Tmp;
5190}
5191
5192
5193/**
5194 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
5195 * exception prevents it.
5196 *
5197 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5198 * @param pResult The FPU operation result to push.
5199 * @param pFpuCtx The FPU context.
5200 */
5201static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5202{
5203 /* Update FSW and bail if there are pending exceptions afterwards. */
5204 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5205 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5206 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5207 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5208 {
5209 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))
5210 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",
5211 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
5212 pFpuCtx->FSW = fFsw;
5213 return;
5214 }
5215
5216 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5217 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5218 {
5219 /* All is fine, push the actual value. */
5220 pFpuCtx->FTW |= RT_BIT(iNewTop);
5221 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
5222 }
5223 else if (pFpuCtx->FCW & X86_FCW_IM)
5224 {
5225 /* Masked stack overflow, push QNaN. */
5226 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5227 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5228 }
5229 else
5230 {
5231 /* Raise stack overflow, don't push anything. */
5232 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5233 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5234 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",
5235 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5236 return;
5237 }
5238
5239 fFsw &= ~X86_FSW_TOP_MASK;
5240 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5241 pFpuCtx->FSW = fFsw;
5242
5243 iemFpuRotateStackPush(pFpuCtx);
5244 RT_NOREF(pVCpu);
5245}
5246
5247
5248/**
5249 * Stores a result in a FPU register and updates the FSW and FTW.
5250 *
5251 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5252 * @param pFpuCtx The FPU context.
5253 * @param pResult The result to store.
5254 * @param iStReg Which FPU register to store it in.
5255 */
5256static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
5257{
5258 Assert(iStReg < 8);
5259 uint16_t fNewFsw = pFpuCtx->FSW;
5260 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;
5261 fNewFsw &= ~X86_FSW_C_MASK;
5262 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5263 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
5264 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
5265 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
5266 pFpuCtx->FSW = fNewFsw;
5267 pFpuCtx->FTW |= RT_BIT(iReg);
5268 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
5269 RT_NOREF(pVCpu);
5270}
5271
5272
5273/**
5274 * Only updates the FPU status word (FSW) with the result of the current
5275 * instruction.
5276 *
5277 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5278 * @param pFpuCtx The FPU context.
5279 * @param u16FSW The FSW output of the current instruction.
5280 */
5281static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
5282{
5283 uint16_t fNewFsw = pFpuCtx->FSW;
5284 fNewFsw &= ~X86_FSW_C_MASK;
5285 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;
5286 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
5287 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
5288 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
5289 pFpuCtx->FSW = fNewFsw;
5290 RT_NOREF(pVCpu);
5291}
5292
5293
5294/**
5295 * Pops one item off the FPU stack if no pending exception prevents it.
5296 *
5297 * @param pFpuCtx The FPU context.
5298 */
5299static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5300{
5301 /* Check pending exceptions. */
5302 uint16_t uFSW = pFpuCtx->FSW;
5303 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5304 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5305 return;
5306
5307 /* TOP--. */
5308 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
5309 uFSW &= ~X86_FSW_TOP_MASK;
5310 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5311 pFpuCtx->FSW = uFSW;
5312
5313 /* Mark the previous ST0 as empty. */
5314 iOldTop >>= X86_FSW_TOP_SHIFT;
5315 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
5316
5317 /* Rotate the registers. */
5318 iemFpuRotateStackPop(pFpuCtx);
5319}
5320
5321
5322/**
5323 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
5324 *
5325 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5326 * @param pResult The FPU operation result to push.
5327 * @param uFpuOpcode The FPU opcode value.
5328 */
5329void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
5330{
5331 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5332 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5333 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
5334}
5335
5336
5337/**
5338 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
5339 * and sets FPUDP and FPUDS.
5340 *
5341 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5342 * @param pResult The FPU operation result to push.
5343 * @param iEffSeg The effective segment register.
5344 * @param GCPtrEff The effective address relative to @a iEffSeg.
5345 * @param uFpuOpcode The FPU opcode value.
5346 */
5347void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5348 uint16_t uFpuOpcode) RT_NOEXCEPT
5349{
5350 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5351 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5352 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5353 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
5354}
5355
5356
5357/**
5358 * Replace ST0 with the first value and push the second onto the FPU stack,
5359 * unless a pending exception prevents it.
5360 *
5361 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5362 * @param pResult The FPU operation result to store and push.
5363 * @param uFpuOpcode The FPU opcode value.
5364 */
5365void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
5366{
5367 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5368 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5369
5370 /* Update FSW and bail if there are pending exceptions afterwards. */
5371 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5372 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5373 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5374 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5375 {
5376 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
5377 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",
5378 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
5379 pFpuCtx->FSW = fFsw;
5380 return;
5381 }
5382
5383 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5384 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5385 {
5386 /* All is fine, push the actual value. */
5387 pFpuCtx->FTW |= RT_BIT(iNewTop);
5388 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
5389 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
5390 }
5391 else if (pFpuCtx->FCW & X86_FCW_IM)
5392 {
5393 /* Masked stack overflow, push QNaN. */
5394 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5395 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5396 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5397 }
5398 else
5399 {
5400 /* Raise stack overflow, don't push anything. */
5401 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5402 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5403 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",
5404 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5405 return;
5406 }
5407
5408 fFsw &= ~X86_FSW_TOP_MASK;
5409 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5410 pFpuCtx->FSW = fFsw;
5411
5412 iemFpuRotateStackPush(pFpuCtx);
5413}
5414
5415
5416/**
5417 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5418 * FOP.
5419 *
5420 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5421 * @param pResult The result to store.
5422 * @param iStReg Which FPU register to store it in.
5423 * @param uFpuOpcode The FPU opcode value.
5424 */
5425void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5426{
5427 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5428 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5429 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5430}
5431
5432
5433/**
5434 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5435 * FOP, and then pops the stack.
5436 *
5437 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5438 * @param pResult The result to store.
5439 * @param iStReg Which FPU register to store it in.
5440 * @param uFpuOpcode The FPU opcode value.
5441 */
5442void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5443{
5444 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5445 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5446 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5447 iemFpuMaybePopOne(pFpuCtx);
5448}
5449
5450
5451/**
5452 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5453 * FPUDP, and FPUDS.
5454 *
5455 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5456 * @param pResult The result to store.
5457 * @param iStReg Which FPU register to store it in.
5458 * @param iEffSeg The effective memory operand selector register.
5459 * @param GCPtrEff The effective memory operand offset.
5460 * @param uFpuOpcode The FPU opcode value.
5461 */
5462void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5463 uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5464{
5465 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5466 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5467 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5468 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5469}
5470
5471
5472/**
5473 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5474 * FPUDP, and FPUDS, and then pops the stack.
5475 *
5476 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5477 * @param pResult The result to store.
5478 * @param iStReg Which FPU register to store it in.
5479 * @param iEffSeg The effective memory operand selector register.
5480 * @param GCPtrEff The effective memory operand offset.
5481 * @param uFpuOpcode The FPU opcode value.
5482 */
5483void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
5484 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5485{
5486 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5487 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5488 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5489 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5490 iemFpuMaybePopOne(pFpuCtx);
5491}
5492
5493
5494/**
5495 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5496 *
5497 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5498 * @param uFpuOpcode The FPU opcode value.
5499 */
5500void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5501{
5502 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5503 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5504}
5505
5506
5507/**
5508 * Updates the FSW, FOP, FPUIP, and FPUCS.
5509 *
5510 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5511 * @param u16FSW The FSW from the current instruction.
5512 * @param uFpuOpcode The FPU opcode value.
5513 */
5514void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5515{
5516 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5517 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5518 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5519}
5520
5521
5522/**
5523 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5524 *
5525 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5526 * @param u16FSW The FSW from the current instruction.
5527 * @param uFpuOpcode The FPU opcode value.
5528 */
5529void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5530{
5531 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5532 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5533 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5534 iemFpuMaybePopOne(pFpuCtx);
5535}
5536
5537
5538/**
5539 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5540 *
5541 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5542 * @param u16FSW The FSW from the current instruction.
5543 * @param iEffSeg The effective memory operand selector register.
5544 * @param GCPtrEff The effective memory operand offset.
5545 * @param uFpuOpcode The FPU opcode value.
5546 */
5547void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5548{
5549 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5550 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5551 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5552 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5553}
5554
5555
5556/**
5557 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5558 *
5559 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5560 * @param u16FSW The FSW from the current instruction.
5561 * @param uFpuOpcode The FPU opcode value.
5562 */
5563void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5564{
5565 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5566 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5567 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5568 iemFpuMaybePopOne(pFpuCtx);
5569 iemFpuMaybePopOne(pFpuCtx);
5570}
5571
5572
5573/**
5574 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5575 *
5576 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5577 * @param u16FSW The FSW from the current instruction.
5578 * @param iEffSeg The effective memory operand selector register.
5579 * @param GCPtrEff The effective memory operand offset.
5580 * @param uFpuOpcode The FPU opcode value.
5581 */
5582void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5583 uint16_t uFpuOpcode) RT_NOEXCEPT
5584{
5585 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5586 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5587 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5588 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5589 iemFpuMaybePopOne(pFpuCtx);
5590}
5591
5592
5593/**
5594 * Worker routine for raising an FPU stack underflow exception.
5595 *
5596 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5597 * @param pFpuCtx The FPU context.
5598 * @param iStReg The stack register being accessed.
5599 */
5600static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5601{
5602 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5603 if (pFpuCtx->FCW & X86_FCW_IM)
5604 {
5605 /* Masked underflow. */
5606 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5607 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5608 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5609 if (iStReg != UINT8_MAX)
5610 {
5611 pFpuCtx->FTW |= RT_BIT(iReg);
5612 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5613 }
5614 }
5615 else
5616 {
5617 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5618 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5619 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",
5620 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5621 }
5622 RT_NOREF(pVCpu);
5623}
5624
5625
5626/**
5627 * Raises a FPU stack underflow exception.
5628 *
5629 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5630 * @param iStReg The destination register that should be loaded
5631 * with QNaN if \#IS is not masked. Specify
5632 * UINT8_MAX if none (like for fcom).
5633 * @param uFpuOpcode The FPU opcode value.
5634 */
5635void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5636{
5637 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5638 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5639 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5640}
5641
5642
5643void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5644{
5645 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5646 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5647 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5648 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5649}
5650
5651
5652void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5653{
5654 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5655 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5656 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5657 iemFpuMaybePopOne(pFpuCtx);
5658}
5659
5660
5661void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5662 uint16_t uFpuOpcode) RT_NOEXCEPT
5663{
5664 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5665 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5666 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5667 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5668 iemFpuMaybePopOne(pFpuCtx);
5669}
5670
5671
5672void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5673{
5674 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5675 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5676 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
5677 iemFpuMaybePopOne(pFpuCtx);
5678 iemFpuMaybePopOne(pFpuCtx);
5679}
5680
5681
5682void iemFpuStackPushUnderflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5683{
5684 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5685 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5686
5687 if (pFpuCtx->FCW & X86_FCW_IM)
5688 {
5689 /* Masked overflow - Push QNaN. */
5690 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5691 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5692 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5693 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5694 pFpuCtx->FTW |= RT_BIT(iNewTop);
5695 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5696 iemFpuRotateStackPush(pFpuCtx);
5697 }
5698 else
5699 {
5700 /* Exception pending - don't change TOP or the register stack. */
5701 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5702 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5703 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",
5704 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5705 }
5706}
5707
5708
5709void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5710{
5711 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5712 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5713
5714 if (pFpuCtx->FCW & X86_FCW_IM)
5715 {
5716 /* Masked overflow - Push QNaN. */
5717 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5718 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5719 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5720 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5721 pFpuCtx->FTW |= RT_BIT(iNewTop);
5722 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5723 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5724 iemFpuRotateStackPush(pFpuCtx);
5725 }
5726 else
5727 {
5728 /* Exception pending - don't change TOP or the register stack. */
5729 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5730 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5731 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",
5732 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5733 }
5734}
5735
5736
5737/**
5738 * Worker routine for raising an FPU stack overflow exception on a push.
5739 *
5740 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5741 * @param pFpuCtx The FPU context.
5742 */
5743static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5744{
5745 if (pFpuCtx->FCW & X86_FCW_IM)
5746 {
5747 /* Masked overflow. */
5748 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5749 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5750 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5751 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5752 pFpuCtx->FTW |= RT_BIT(iNewTop);
5753 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5754 iemFpuRotateStackPush(pFpuCtx);
5755 }
5756 else
5757 {
5758 /* Exception pending - don't change TOP or the register stack. */
5759 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5760 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5761 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",
5762 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5763 }
5764 RT_NOREF(pVCpu);
5765}
5766
5767
5768/**
5769 * Raises a FPU stack overflow exception on a push.
5770 *
5771 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5772 * @param uFpuOpcode The FPU opcode value.
5773 */
5774void iemFpuStackPushOverflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5775{
5776 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5777 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5778 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5779}
5780
5781
5782/**
5783 * Raises a FPU stack overflow exception on a push with a memory operand.
5784 *
5785 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5786 * @param iEffSeg The effective memory operand selector register.
5787 * @param GCPtrEff The effective memory operand offset.
5788 * @param uFpuOpcode The FPU opcode value.
5789 */
5790void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5791{
5792 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5793 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5794 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5795 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5796}
5797
5798/** @} */
5799
5800
5801/** @name Memory access.
5802 *
5803 * @{
5804 */
5805
5806#undef LOG_GROUP
5807#define LOG_GROUP LOG_GROUP_IEM_MEM
5808
5809/**
5810 * Updates the IEMCPU::cbWritten counter if applicable.
5811 *
5812 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5813 * @param fAccess The access being accounted for.
5814 * @param cbMem The access size.
5815 */
5816DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5817{
5818 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5819 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5820 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5821}
5822
5823
5824/**
5825 * Applies the segment limit, base and attributes.
5826 *
5827 * This may raise a \#GP or \#SS.
5828 *
5829 * @returns VBox strict status code.
5830 *
5831 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5832 * @param fAccess The kind of access which is being performed.
5833 * @param iSegReg The index of the segment register to apply.
5834 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5835 * TSS, ++).
5836 * @param cbMem The access size.
5837 * @param pGCPtrMem Pointer to the guest memory address to apply
5838 * segmentation to. Input and output parameter.
5839 */
5840VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5841{
5842 if (iSegReg == UINT8_MAX)
5843 return VINF_SUCCESS;
5844
5845 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5846 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5847 switch (IEM_GET_CPU_MODE(pVCpu))
5848 {
5849 case IEMMODE_16BIT:
5850 case IEMMODE_32BIT:
5851 {
5852 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5853 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5854
5855 if ( pSel->Attr.n.u1Present
5856 && !pSel->Attr.n.u1Unusable)
5857 {
5858 Assert(pSel->Attr.n.u1DescType);
5859 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5860 {
5861 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5862 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5863 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5864
5865 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5866 {
5867 /** @todo CPL check. */
5868 }
5869
5870 /*
5871 * There are two kinds of data selectors, normal and expand down.
5872 */
5873 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5874 {
5875 if ( GCPtrFirst32 > pSel->u32Limit
5876 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5877 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5878 }
5879 else
5880 {
5881 /*
5882 * The upper boundary is defined by the B bit, not the G bit!
5883 */
5884 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5885 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5886 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5887 }
5888 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5889 }
5890 else
5891 {
5892 /*
5893 * Code selector and usually be used to read thru, writing is
5894 * only permitted in real and V8086 mode.
5895 */
5896 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5897 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5898 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5899 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5900 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5901
5902 if ( GCPtrFirst32 > pSel->u32Limit
5903 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5904 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5905
5906 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5907 {
5908 /** @todo CPL check. */
5909 }
5910
5911 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5912 }
5913 }
5914 else
5915 return iemRaiseGeneralProtectionFault0(pVCpu);
5916 return VINF_SUCCESS;
5917 }
5918
5919 case IEMMODE_64BIT:
5920 {
5921 RTGCPTR GCPtrMem = *pGCPtrMem;
5922 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5923 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5924
5925 Assert(cbMem >= 1);
5926 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5927 return VINF_SUCCESS;
5928 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5929 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5930 return iemRaiseGeneralProtectionFault0(pVCpu);
5931 }
5932
5933 default:
5934 AssertFailedReturn(VERR_IEM_IPE_7);
5935 }
5936}
5937
5938
5939/**
5940 * Translates a virtual address to a physical physical address and checks if we
5941 * can access the page as specified.
5942 *
5943 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5944 * @param GCPtrMem The virtual address.
5945 * @param cbAccess The access size, for raising \#PF correctly for
5946 * FXSAVE and such.
5947 * @param fAccess The intended access.
5948 * @param pGCPhysMem Where to return the physical address.
5949 */
5950VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess,
5951 uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5952{
5953 /** @todo Need a different PGM interface here. We're currently using
5954 * generic / REM interfaces. this won't cut it for R0. */
5955 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5956 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5957 * here. */
5958 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
5959 PGMPTWALKFAST WalkFast;
5960 AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);
5961 AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);
5962 AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);
5963 AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);
5964 uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))
5965 | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);
5966 if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5967 fQPage |= PGMQPAGE_F_USER_MODE;
5968 int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);
5969 if (RT_SUCCESS(rc))
5970 {
5971 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
5972
5973 /* If the page is writable and does not have the no-exec bit set, all
5974 access is allowed. Otherwise we'll have to check more carefully... */
5975 Assert( (WalkFast.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) == (X86_PTE_RW | X86_PTE_US)
5976 || ( ( !(fAccess & IEM_ACCESS_TYPE_WRITE)
5977 || (WalkFast.fEffective & X86_PTE_RW)
5978 || ( ( IEM_GET_CPL(pVCpu) != 3
5979 || (fAccess & IEM_ACCESS_WHAT_SYS))
5980 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)) )
5981 && ( (WalkFast.fEffective & X86_PTE_US)
5982 || IEM_GET_CPL(pVCpu) != 3
5983 || (fAccess & IEM_ACCESS_WHAT_SYS) )
5984 && ( !(fAccess & IEM_ACCESS_TYPE_EXEC)
5985 || !(WalkFast.fEffective & X86_PTE_PAE_NX)
5986 || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
5987 )
5988 );
5989
5990 /* PGMGstQueryPageFast sets the A & D bits. */
5991 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5992 Assert(!(~WalkFast.fEffective & (fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A)));
5993
5994 *pGCPhysMem = WalkFast.GCPhys;
5995 return VINF_SUCCESS;
5996 }
5997
5998 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5999 /** @todo Check unassigned memory in unpaged mode. */
6000#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6001 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
6002 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6003#endif
6004 *pGCPhysMem = NIL_RTGCPHYS;
6005 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, rc);
6006}
6007
6008#if 0 /*unused*/
6009/**
6010 * Looks up a memory mapping entry.
6011 *
6012 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
6013 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6014 * @param pvMem The memory address.
6015 * @param fAccess The access to.
6016 */
6017DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
6018{
6019 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6020 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
6021 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
6022 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6023 return 0;
6024 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
6025 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6026 return 1;
6027 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
6028 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6029 return 2;
6030 return VERR_NOT_FOUND;
6031}
6032#endif
6033
6034/**
6035 * Finds a free memmap entry when using iNextMapping doesn't work.
6036 *
6037 * @returns Memory mapping index, 1024 on failure.
6038 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6039 */
6040static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
6041{
6042 /*
6043 * The easy case.
6044 */
6045 if (pVCpu->iem.s.cActiveMappings == 0)
6046 {
6047 pVCpu->iem.s.iNextMapping = 1;
6048 return 0;
6049 }
6050
6051 /* There should be enough mappings for all instructions. */
6052 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
6053
6054 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
6055 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
6056 return i;
6057
6058 AssertFailedReturn(1024);
6059}
6060
6061
6062/**
6063 * Commits a bounce buffer that needs writing back and unmaps it.
6064 *
6065 * @returns Strict VBox status code.
6066 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6067 * @param iMemMap The index of the buffer to commit.
6068 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
6069 * Always false in ring-3, obviously.
6070 */
6071static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
6072{
6073 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
6074 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
6075#ifdef IN_RING3
6076 Assert(!fPostponeFail);
6077 RT_NOREF_PV(fPostponeFail);
6078#endif
6079
6080 /*
6081 * Do the writing.
6082 */
6083 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6084 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
6085 {
6086 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
6087 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
6088 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6089 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6090 {
6091 /*
6092 * Carefully and efficiently dealing with access handler return
6093 * codes make this a little bloated.
6094 */
6095 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
6096 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
6097 pbBuf,
6098 cbFirst,
6099 PGMACCESSORIGIN_IEM);
6100 if (rcStrict == VINF_SUCCESS)
6101 {
6102 if (cbSecond)
6103 {
6104 rcStrict = PGMPhysWrite(pVM,
6105 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
6106 pbBuf + cbFirst,
6107 cbSecond,
6108 PGMACCESSORIGIN_IEM);
6109 if (rcStrict == VINF_SUCCESS)
6110 { /* nothing */ }
6111 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6112 {
6113 LogEx(LOG_GROUP_IEM,
6114 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
6115 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6116 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6117 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6118 }
6119#ifndef IN_RING3
6120 else if (fPostponeFail)
6121 {
6122 LogEx(LOG_GROUP_IEM,
6123 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
6124 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6125 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6126 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
6127 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
6128 return iemSetPassUpStatus(pVCpu, rcStrict);
6129 }
6130#endif
6131 else
6132 {
6133 LogEx(LOG_GROUP_IEM,
6134 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6135 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6136 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6137 return rcStrict;
6138 }
6139 }
6140 }
6141 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6142 {
6143 if (!cbSecond)
6144 {
6145 LogEx(LOG_GROUP_IEM,
6146 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
6147 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6148 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6149 }
6150 else
6151 {
6152 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
6153 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
6154 pbBuf + cbFirst,
6155 cbSecond,
6156 PGMACCESSORIGIN_IEM);
6157 if (rcStrict2 == VINF_SUCCESS)
6158 {
6159 LogEx(LOG_GROUP_IEM,
6160 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
6161 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6162 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6163 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6164 }
6165 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6166 {
6167 LogEx(LOG_GROUP_IEM,
6168 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
6169 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6170 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6171 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6172 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6173 }
6174#ifndef IN_RING3
6175 else if (fPostponeFail)
6176 {
6177 LogEx(LOG_GROUP_IEM,
6178 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
6179 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6180 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6181 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
6182 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
6183 return iemSetPassUpStatus(pVCpu, rcStrict);
6184 }
6185#endif
6186 else
6187 {
6188 LogEx(LOG_GROUP_IEM,
6189 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6190 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6191 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6192 return rcStrict2;
6193 }
6194 }
6195 }
6196#ifndef IN_RING3
6197 else if (fPostponeFail)
6198 {
6199 LogEx(LOG_GROUP_IEM,
6200 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
6201 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6202 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6203 if (!cbSecond)
6204 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
6205 else
6206 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
6207 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
6208 return iemSetPassUpStatus(pVCpu, rcStrict);
6209 }
6210#endif
6211 else
6212 {
6213 LogEx(LOG_GROUP_IEM,
6214 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6215 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6216 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6217 return rcStrict;
6218 }
6219 }
6220 else
6221 {
6222 /*
6223 * No access handlers, much simpler.
6224 */
6225 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
6226 if (RT_SUCCESS(rc))
6227 {
6228 if (cbSecond)
6229 {
6230 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
6231 if (RT_SUCCESS(rc))
6232 { /* likely */ }
6233 else
6234 {
6235 LogEx(LOG_GROUP_IEM,
6236 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6237 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6238 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
6239 return rc;
6240 }
6241 }
6242 }
6243 else
6244 {
6245 LogEx(LOG_GROUP_IEM,
6246 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6247 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
6248 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6249 return rc;
6250 }
6251 }
6252 }
6253
6254#if defined(IEM_LOG_MEMORY_WRITES)
6255 Log5(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
6256 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
6257 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
6258 Log5(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
6259 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
6260 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
6261
6262 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
6263 g_cbIemWrote = cbWrote;
6264 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
6265#endif
6266
6267 /*
6268 * Free the mapping entry.
6269 */
6270 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6271 Assert(pVCpu->iem.s.cActiveMappings != 0);
6272 pVCpu->iem.s.cActiveMappings--;
6273 return VINF_SUCCESS;
6274}
6275
6276
6277/**
6278 * Helper for iemMemMap, iemMemMapJmp and iemMemBounceBufferMapCrossPage.
6279 */
6280DECL_FORCE_INLINE(uint32_t)
6281iemMemCheckDataBreakpoint(PVMCC pVM, PVMCPUCC pVCpu, RTGCPTR GCPtrMem, size_t cbMem, uint32_t fAccess)
6282{
6283 bool const fSysAccess = (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_SYS;
6284 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6285 return DBGFBpCheckDataWrite(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);
6286 return DBGFBpCheckDataRead(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);
6287}
6288
6289
6290/**
6291 * iemMemMap worker that deals with a request crossing pages.
6292 */
6293static VBOXSTRICTRC
6294iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, uint8_t *pbUnmapInfo,
6295 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
6296{
6297 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemBounceBufferCrossPage);
6298 Assert(cbMem <= GUEST_PAGE_SIZE);
6299
6300 /*
6301 * Do the address translations.
6302 */
6303 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
6304 RTGCPHYS GCPhysFirst;
6305 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
6306 if (rcStrict != VINF_SUCCESS)
6307 return rcStrict;
6308 Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
6309
6310 uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
6311 RTGCPHYS GCPhysSecond;
6312 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
6313 cbSecondPage, fAccess, &GCPhysSecond);
6314 if (rcStrict != VINF_SUCCESS)
6315 return rcStrict;
6316 Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
6317 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
6318
6319 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6320
6321 /*
6322 * Check for data breakpoints.
6323 */
6324 if (RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA)))
6325 { /* likely */ }
6326 else
6327 {
6328 uint32_t fDataBps = iemMemCheckDataBreakpoint(pVM, pVCpu, GCPtrFirst, cbFirstPage, fAccess);
6329 fDataBps |= iemMemCheckDataBreakpoint(pVM, pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
6330 cbSecondPage, fAccess);
6331 pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
6332 if (fDataBps > 1)
6333 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapCrossPage: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",
6334 fDataBps, GCPtrFirst, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
6335 }
6336
6337 /*
6338 * Read in the current memory content if it's a read, execute or partial
6339 * write access.
6340 */
6341 uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6342
6343 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6344 {
6345 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6346 {
6347 /*
6348 * Must carefully deal with access handler status codes here,
6349 * makes the code a bit bloated.
6350 */
6351 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
6352 if (rcStrict == VINF_SUCCESS)
6353 {
6354 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6355 if (rcStrict == VINF_SUCCESS)
6356 { /*likely */ }
6357 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6358 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6359 else
6360 {
6361 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
6362 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6363 return rcStrict;
6364 }
6365 }
6366 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6367 {
6368 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6369 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6370 {
6371 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6372 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6373 }
6374 else
6375 {
6376 LogEx(LOG_GROUP_IEM,
6377 ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6378 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6379 return rcStrict2;
6380 }
6381 }
6382 else
6383 {
6384 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6385 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6386 return rcStrict;
6387 }
6388 }
6389 else
6390 {
6391 /*
6392 * No informational status codes here, much more straight forward.
6393 */
6394 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6395 if (RT_SUCCESS(rc))
6396 {
6397 Assert(rc == VINF_SUCCESS);
6398 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6399 if (RT_SUCCESS(rc))
6400 Assert(rc == VINF_SUCCESS);
6401 else
6402 {
6403 LogEx(LOG_GROUP_IEM,
6404 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6405 return rc;
6406 }
6407 }
6408 else
6409 {
6410 LogEx(LOG_GROUP_IEM,
6411 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6412 return rc;
6413 }
6414 }
6415 }
6416#ifdef VBOX_STRICT
6417 else
6418 memset(pbBuf, 0xcc, cbMem);
6419 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6420 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6421#endif
6422 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
6423
6424 /*
6425 * Commit the bounce buffer entry.
6426 */
6427 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6428 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6429 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6430 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6431 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
6432 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6433 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6434 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6435 pVCpu->iem.s.cActiveMappings++;
6436
6437 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6438 *ppvMem = pbBuf;
6439 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6440 return VINF_SUCCESS;
6441}
6442
6443
6444/**
6445 * iemMemMap woker that deals with iemMemPageMap failures.
6446 */
6447static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem,
6448 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6449{
6450 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemBounceBufferMapPhys);
6451
6452 /*
6453 * Filter out conditions we can handle and the ones which shouldn't happen.
6454 */
6455 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6456 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6457 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6458 {
6459 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6460 return rcMap;
6461 }
6462 pVCpu->iem.s.cPotentialExits++;
6463
6464 /*
6465 * Read in the current memory content if it's a read, execute or partial
6466 * write access.
6467 */
6468 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6469 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6470 {
6471 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6472 memset(pbBuf, 0xff, cbMem);
6473 else
6474 {
6475 int rc;
6476 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6477 {
6478 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6479 if (rcStrict == VINF_SUCCESS)
6480 { /* nothing */ }
6481 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6482 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6483 else
6484 {
6485 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6486 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6487 return rcStrict;
6488 }
6489 }
6490 else
6491 {
6492 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
6493 if (RT_SUCCESS(rc))
6494 { /* likely */ }
6495 else
6496 {
6497 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6498 GCPhysFirst, rc));
6499 return rc;
6500 }
6501 }
6502 }
6503 }
6504#ifdef VBOX_STRICT
6505 else
6506 memset(pbBuf, 0xcc, cbMem);
6507#endif
6508#ifdef VBOX_STRICT
6509 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6510 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6511#endif
6512
6513 /*
6514 * Commit the bounce buffer entry.
6515 */
6516 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6517 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6518 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6519 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
6520 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6521 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6522 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6523 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6524 pVCpu->iem.s.cActiveMappings++;
6525
6526 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6527 *ppvMem = pbBuf;
6528 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6529 return VINF_SUCCESS;
6530}
6531
6532
6533
6534/**
6535 * Maps the specified guest memory for the given kind of access.
6536 *
6537 * This may be using bounce buffering of the memory if it's crossing a page
6538 * boundary or if there is an access handler installed for any of it. Because
6539 * of lock prefix guarantees, we're in for some extra clutter when this
6540 * happens.
6541 *
6542 * This may raise a \#GP, \#SS, \#PF or \#AC.
6543 *
6544 * @returns VBox strict status code.
6545 *
6546 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6547 * @param ppvMem Where to return the pointer to the mapped memory.
6548 * @param pbUnmapInfo Where to return unmap info to be passed to
6549 * iemMemCommitAndUnmap or iemMemRollbackAndUnmap when
6550 * done.
6551 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
6552 * 8, 12, 16, 32 or 512. When used by string operations
6553 * it can be up to a page.
6554 * @param iSegReg The index of the segment register to use for this
6555 * access. The base and limits are checked. Use UINT8_MAX
6556 * to indicate that no segmentation is required (for IDT,
6557 * GDT and LDT accesses).
6558 * @param GCPtrMem The address of the guest memory.
6559 * @param fAccess How the memory is being accessed. The
6560 * IEM_ACCESS_TYPE_XXX part is used to figure out how to
6561 * map the memory, while the IEM_ACCESS_WHAT_XXX part is
6562 * used when raising exceptions. The IEM_ACCESS_ATOMIC and
6563 * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be
6564 * set.
6565 * @param uAlignCtl Alignment control:
6566 * - Bits 15:0 is the alignment mask.
6567 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6568 * IEM_MEMMAP_F_ALIGN_SSE, and
6569 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6570 * Pass zero to skip alignment.
6571 */
6572VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
6573 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
6574{
6575 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemMapNoJmp);
6576
6577 /*
6578 * Check the input and figure out which mapping entry to use.
6579 */
6580 Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));
6581 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94
6582 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );
6583 Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE)));
6584 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6585
6586 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6587 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6588 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6589 {
6590 iMemMap = iemMemMapFindFree(pVCpu);
6591 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6592 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6593 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6594 pVCpu->iem.s.aMemMappings[2].fAccess),
6595 VERR_IEM_IPE_9);
6596 }
6597
6598 /*
6599 * Map the memory, checking that we can actually access it. If something
6600 * slightly complicated happens, fall back on bounce buffering.
6601 */
6602 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6603 if (rcStrict == VINF_SUCCESS)
6604 { /* likely */ }
6605 else
6606 return rcStrict;
6607
6608 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
6609 { /* likely */ }
6610 else
6611 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
6612
6613 /*
6614 * Alignment check.
6615 */
6616 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6617 { /* likelyish */ }
6618 else
6619 {
6620 /* Misaligned access. */
6621 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6622 {
6623 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6624 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6625 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6626 {
6627 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6628
6629 if (!iemMemAreAlignmentChecksEnabled(pVCpu))
6630 { /* likely */ }
6631 else
6632 return iemRaiseAlignmentCheckException(pVCpu);
6633 }
6634 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6635 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6636 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6637 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6638 * that's what FXSAVE does on a 10980xe. */
6639 && iemMemAreAlignmentChecksEnabled(pVCpu))
6640 return iemRaiseAlignmentCheckException(pVCpu);
6641 else
6642 return iemRaiseGeneralProtectionFault0(pVCpu);
6643 }
6644
6645#if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64)
6646 /* If the access is atomic there are host platform alignmnet restrictions
6647 we need to conform with. */
6648 if ( !(fAccess & IEM_ACCESS_ATOMIC)
6649# if defined(RT_ARCH_AMD64)
6650 || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */
6651# elif defined(RT_ARCH_ARM64)
6652 || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */
6653# else
6654# error port me
6655# endif
6656 )
6657 { /* okay */ }
6658 else
6659 {
6660 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem));
6661 pVCpu->iem.s.cMisalignedAtomics += 1;
6662 return VINF_EM_EMULATE_SPLIT_LOCK;
6663 }
6664#endif
6665 }
6666
6667#ifdef IEM_WITH_DATA_TLB
6668 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6669
6670 /*
6671 * Get the TLB entry for this page and check PT flags.
6672 *
6673 * We reload the TLB entry if we need to set the dirty bit (accessed
6674 * should in theory always be set).
6675 */
6676 uint8_t *pbMem = NULL;
6677 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrMem);
6678 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
6679 uint64_t const fTlbeAD = IEMTLBE_F_PT_NO_ACCESSED | (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0);
6680 if ( ( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
6681 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) )
6682 || ( (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)
6683 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) ) )
6684 {
6685# ifdef IEM_WITH_TLB_STATISTICS
6686 pVCpu->iem.s.DataTlb.cTlbCoreHits++;
6687#endif
6688
6689 /* If the page is either supervisor only or non-writable, we need to do
6690 more careful access checks. */
6691 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
6692 {
6693 /* Write to read only memory? */
6694 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
6695 && (fAccess & IEM_ACCESS_TYPE_WRITE)
6696 && ( ( IEM_GET_CPL(pVCpu) == 3
6697 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6698 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
6699 {
6700 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6701 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6702 }
6703
6704 /* Kernel memory accessed by userland? */
6705 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6706 && IEM_GET_CPL(pVCpu) == 3
6707 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6708 {
6709 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6710 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6711 }
6712 }
6713
6714 /* Look up the physical page info if necessary. */
6715 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6716# ifdef IN_RING3
6717 pbMem = pTlbe->pbMappingR3;
6718# else
6719 pbMem = NULL;
6720# endif
6721 else
6722 {
6723 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
6724 { /* likely */ }
6725 else
6726 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
6727 pTlbe->pbMappingR3 = NULL;
6728 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;
6729 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6730 &pbMem, &pTlbe->fFlagsAndPhysRev);
6731 AssertRCReturn(rc, rc);
6732# ifdef IN_RING3
6733 pTlbe->pbMappingR3 = pbMem;
6734# endif
6735 }
6736 }
6737 else
6738 {
6739 pVCpu->iem.s.DataTlb.cTlbCoreMisses++;
6740
6741 /* This page table walking will set A bits as required by the access while performing the walk.
6742 ASSUMES these are set when the address is translated rather than on commit... */
6743 /** @todo testcase: check when A bits are actually set by the CPU for code. */
6744 PGMPTWALKFAST WalkFast;
6745 AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);
6746 AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);
6747 AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);
6748 AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);
6749 uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))
6750 | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);
6751 if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6752 fQPage |= PGMQPAGE_F_USER_MODE;
6753 int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);
6754 if (RT_SUCCESS(rc))
6755 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
6756 else
6757 {
6758 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6759# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6760 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
6761 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6762# endif
6763 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6764 }
6765
6766 uint32_t fDataBps;
6767 if ( RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA))
6768 || RT_LIKELY(!(fDataBps = iemMemCheckDataBreakpoint(pVCpu->CTX_SUFF(pVM), pVCpu, GCPtrMem, cbMem, fAccess))))
6769 {
6770 if ( !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)
6771 || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */
6772 {
6773 pTlbe--;
6774 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision;
6775 }
6776 else
6777 {
6778 pVCpu->iem.s.DataTlb.cTlbCoreGlobalLoads++;
6779 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal;
6780 }
6781 }
6782 else
6783 {
6784 /* If we hit a data breakpoint, we use a dummy TLBE to force all accesses
6785 to the page with the data access breakpoint armed on it to pass thru here. */
6786 if (fDataBps > 1)
6787 LogEx(LOG_GROUP_IEM, ("iemMemMap: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",
6788 fDataBps, GCPtrMem, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
6789 pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
6790 pTlbe = &pVCpu->iem.s.DataBreakpointTlbe;
6791 pTlbe->uTag = uTagNoRev;
6792 }
6793 pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A) /* skipping NX */)
6794 | (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE);
6795 RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
6796 pTlbe->GCPhys = GCPhysPg;
6797 pTlbe->pbMappingR3 = NULL;
6798 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
6799 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_DIRTY) || !(fAccess & IEM_ACCESS_TYPE_WRITE));
6800 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE) || !(fAccess & IEM_ACCESS_TYPE_WRITE));
6801 Assert( !(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6802 || IEM_GET_CPL(pVCpu) != 3
6803 || (fAccess & IEM_ACCESS_WHAT_SYS));
6804
6805 /* Resolve the physical address. */
6806 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));
6807 rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6808 &pbMem, &pTlbe->fFlagsAndPhysRev);
6809 AssertRCReturn(rc, rc);
6810# ifdef IN_RING3
6811 pTlbe->pbMappingR3 = pbMem;
6812# endif
6813 }
6814
6815 /*
6816 * Check the physical page level access and mapping.
6817 */
6818 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
6819 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
6820 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
6821 { /* probably likely */ }
6822 else
6823 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem,
6824 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6825 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6826 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6827 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6828 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6829
6830 if (pbMem)
6831 {
6832 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6833 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6834 fAccess |= IEM_ACCESS_NOT_LOCKED;
6835 }
6836 else
6837 {
6838 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6839 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6840 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6841 if (rcStrict != VINF_SUCCESS)
6842 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6843 }
6844
6845 void * const pvMem = pbMem;
6846
6847 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6848 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6849 if (fAccess & IEM_ACCESS_TYPE_READ)
6850 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6851
6852#else /* !IEM_WITH_DATA_TLB */
6853
6854 RTGCPHYS GCPhysFirst;
6855 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6856 if (rcStrict != VINF_SUCCESS)
6857 return rcStrict;
6858
6859 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6860 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6861 if (fAccess & IEM_ACCESS_TYPE_READ)
6862 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6863
6864 void *pvMem;
6865 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6866 if (rcStrict != VINF_SUCCESS)
6867 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6868
6869#endif /* !IEM_WITH_DATA_TLB */
6870
6871 /*
6872 * Fill in the mapping table entry.
6873 */
6874 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6875 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6876 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6877 pVCpu->iem.s.cActiveMappings += 1;
6878
6879 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6880 *ppvMem = pvMem;
6881 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6882 AssertCompile(IEM_ACCESS_TYPE_MASK <= 0xf);
6883 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.aMemMappings) < 8);
6884
6885 return VINF_SUCCESS;
6886}
6887
6888
6889/**
6890 * Commits the guest memory if bounce buffered and unmaps it.
6891 *
6892 * @returns Strict VBox status code.
6893 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6894 * @param bUnmapInfo Unmap info set by iemMemMap.
6895 */
6896VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6897{
6898 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6899 AssertMsgReturn( (bUnmapInfo & 0x08)
6900 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6901 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf)) == ((unsigned)bUnmapInfo >> 4),
6902 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
6903 VERR_NOT_FOUND);
6904
6905 /* If it's bounce buffered, we may need to write back the buffer. */
6906 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6907 {
6908 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6909 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6910 }
6911 /* Otherwise unlock it. */
6912 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6913 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6914
6915 /* Free the entry. */
6916 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6917 Assert(pVCpu->iem.s.cActiveMappings != 0);
6918 pVCpu->iem.s.cActiveMappings--;
6919 return VINF_SUCCESS;
6920}
6921
6922
6923/**
6924 * Rolls back the guest memory (conceptually only) and unmaps it.
6925 *
6926 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6927 * @param bUnmapInfo Unmap info set by iemMemMap.
6928 */
6929void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6930{
6931 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6932 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
6933 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6934 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
6935 == ((unsigned)bUnmapInfo >> 4),
6936 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
6937
6938 /* Unlock it if necessary. */
6939 if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6940 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6941
6942 /* Free the entry. */
6943 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6944 Assert(pVCpu->iem.s.cActiveMappings != 0);
6945 pVCpu->iem.s.cActiveMappings--;
6946}
6947
6948#ifdef IEM_WITH_SETJMP
6949
6950/**
6951 * Maps the specified guest memory for the given kind of access, longjmp on
6952 * error.
6953 *
6954 * This may be using bounce buffering of the memory if it's crossing a page
6955 * boundary or if there is an access handler installed for any of it. Because
6956 * of lock prefix guarantees, we're in for some extra clutter when this
6957 * happens.
6958 *
6959 * This may raise a \#GP, \#SS, \#PF or \#AC.
6960 *
6961 * @returns Pointer to the mapped memory.
6962 *
6963 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6964 * @param bUnmapInfo Where to return unmap info to be passed to
6965 * iemMemCommitAndUnmapJmp, iemMemCommitAndUnmapRwSafeJmp,
6966 * iemMemCommitAndUnmapWoSafeJmp,
6967 * iemMemCommitAndUnmapRoSafeJmp,
6968 * iemMemRollbackAndUnmapWoSafe or iemMemRollbackAndUnmap
6969 * when done.
6970 * @param cbMem The number of bytes to map. This is usually 1,
6971 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6972 * string operations it can be up to a page.
6973 * @param iSegReg The index of the segment register to use for
6974 * this access. The base and limits are checked.
6975 * Use UINT8_MAX to indicate that no segmentation
6976 * is required (for IDT, GDT and LDT accesses).
6977 * @param GCPtrMem The address of the guest memory.
6978 * @param fAccess How the memory is being accessed. The
6979 * IEM_ACCESS_TYPE_XXX part is used to figure out how to
6980 * map the memory, while the IEM_ACCESS_WHAT_XXX part is
6981 * used when raising exceptions. The IEM_ACCESS_ATOMIC and
6982 * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be
6983 * set.
6984 * @param uAlignCtl Alignment control:
6985 * - Bits 15:0 is the alignment mask.
6986 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6987 * IEM_MEMMAP_F_ALIGN_SSE, and
6988 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6989 * Pass zero to skip alignment.
6990 * @tparam a_fSafe Whether this is a call from "safe" fallback function in
6991 * IEMAllMemRWTmpl.cpp.h (@c true) or a generic one that
6992 * needs counting as such in the statistics.
6993 */
6994template<bool a_fSafeCall = false>
6995static void *iemMemMapJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
6996 uint32_t fAccess, uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
6997{
6998 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemMapJmp);
6999
7000 /*
7001 * Check the input, check segment access and adjust address
7002 * with segment base.
7003 */
7004 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
7005 Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE)));
7006 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
7007
7008 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
7009 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
7010 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7011
7012 /*
7013 * Alignment check.
7014 */
7015 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
7016 { /* likelyish */ }
7017 else
7018 {
7019 /* Misaligned access. */
7020 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
7021 {
7022 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
7023 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
7024 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
7025 {
7026 AssertCompile(X86_CR0_AM == X86_EFL_AC);
7027
7028 if (iemMemAreAlignmentChecksEnabled(pVCpu))
7029 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
7030 }
7031 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
7032 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
7033 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
7034 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
7035 * that's what FXSAVE does on a 10980xe. */
7036 && iemMemAreAlignmentChecksEnabled(pVCpu))
7037 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
7038 else
7039 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
7040 }
7041
7042#if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64)
7043 /* If the access is atomic there are host platform alignmnet restrictions
7044 we need to conform with. */
7045 if ( !(fAccess & IEM_ACCESS_ATOMIC)
7046# if defined(RT_ARCH_AMD64)
7047 || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */
7048# elif defined(RT_ARCH_ARM64)
7049 || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */
7050# else
7051# error port me
7052# endif
7053 )
7054 { /* okay */ }
7055 else
7056 {
7057 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem));
7058 pVCpu->iem.s.cMisalignedAtomics += 1;
7059 IEM_DO_LONGJMP(pVCpu, VINF_EM_EMULATE_SPLIT_LOCK);
7060 }
7061#endif
7062 }
7063
7064 /*
7065 * Figure out which mapping entry to use.
7066 */
7067 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
7068 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
7069 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
7070 {
7071 iMemMap = iemMemMapFindFree(pVCpu);
7072 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
7073 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
7074 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
7075 pVCpu->iem.s.aMemMappings[2].fAccess),
7076 IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_9));
7077 }
7078
7079 /*
7080 * Crossing a page boundary?
7081 */
7082 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
7083 { /* No (likely). */ }
7084 else
7085 {
7086 void *pvMem;
7087 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
7088 if (rcStrict == VINF_SUCCESS)
7089 return pvMem;
7090 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7091 }
7092
7093#ifdef IEM_WITH_DATA_TLB
7094 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
7095
7096 /*
7097 * Get the TLB entry for this page checking that it has the A & D bits
7098 * set as per fAccess flags.
7099 */
7100 /** @todo make the caller pass these in with fAccess. */
7101 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && IEM_GET_CPL(pVCpu) == 3
7102 ? IEMTLBE_F_PT_NO_USER : 0;
7103 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
7104 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
7105 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
7106 || (IEM_GET_CPL(pVCpu) == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
7107 ? IEMTLBE_F_PT_NO_WRITE : 0)
7108 : 0;
7109 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
7110 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrMem);
7111 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
7112 uint64_t const fTlbeAD = IEMTLBE_F_PT_NO_ACCESSED | (fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY);
7113 if ( ( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
7114 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) )
7115 || ( (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)
7116 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) ) )
7117 {
7118# ifdef IEM_WITH_TLB_STATISTICS
7119 if (a_fSafeCall)
7120 pVCpu->iem.s.DataTlb.cTlbSafeHits++;
7121 else
7122 pVCpu->iem.s.DataTlb.cTlbCoreHits++;
7123# endif
7124 }
7125 else
7126 {
7127 if (a_fSafeCall)
7128 pVCpu->iem.s.DataTlb.cTlbSafeMisses++;
7129 else
7130 pVCpu->iem.s.DataTlb.cTlbCoreMisses++;
7131
7132 /* This page table walking will set A and D bits as required by the
7133 access while performing the walk.
7134 ASSUMES these are set when the address is translated rather than on commit... */
7135 /** @todo testcase: check when A and D bits are actually set by the CPU. */
7136 PGMPTWALKFAST WalkFast;
7137 AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);
7138 AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);
7139 AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);
7140 AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);
7141 uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))
7142 | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);
7143 if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))
7144 fQPage |= PGMQPAGE_F_USER_MODE;
7145 int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);
7146 if (RT_SUCCESS(rc))
7147 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
7148 else
7149 {
7150 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
7151# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
7152 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
7153 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
7154# endif
7155 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
7156 }
7157
7158 uint32_t fDataBps;
7159 if ( RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA))
7160 || RT_LIKELY(!(fDataBps = iemMemCheckDataBreakpoint(pVCpu->CTX_SUFF(pVM), pVCpu, GCPtrMem, cbMem, fAccess))))
7161 {
7162 if ( !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)
7163 || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */
7164 {
7165 pTlbe--;
7166 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision;
7167 }
7168 else
7169 {
7170 if (a_fSafeCall)
7171 pVCpu->iem.s.DataTlb.cTlbSafeGlobalLoads++;
7172 else
7173 pVCpu->iem.s.DataTlb.cTlbCoreGlobalLoads++;
7174 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal;
7175 }
7176 }
7177 else
7178 {
7179 /* If we hit a data breakpoint, we use a dummy TLBE to force all accesses
7180 to the page with the data access breakpoint armed on it to pass thru here. */
7181 if (fDataBps > 1)
7182 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp<%d>: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",
7183 a_fSafeCall, fDataBps, GCPtrMem, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7184 pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
7185 pTlbe = &pVCpu->iem.s.DataBreakpointTlbe;
7186 pTlbe->uTag = uTagNoRev;
7187 }
7188 pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A) /* skipping NX */)
7189 | (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE);
7190 RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
7191 pTlbe->GCPhys = GCPhysPg;
7192 pTlbe->pbMappingR3 = NULL;
7193 Assert(!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED)));
7194 Assert(!(pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE));
7195 Assert(!(pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER));
7196
7197 /* Resolve the physical address. */
7198 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));
7199 uint8_t *pbMemFullLoad = NULL;
7200 rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
7201 &pbMemFullLoad, &pTlbe->fFlagsAndPhysRev);
7202 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
7203# ifdef IN_RING3
7204 pTlbe->pbMappingR3 = pbMemFullLoad;
7205# endif
7206 }
7207
7208 /*
7209 * Check the flags and physical revision.
7210 * Note! This will revalidate the uTlbPhysRev after a full load. This is
7211 * just to keep the code structure simple (i.e. avoid gotos or similar).
7212 */
7213 uint8_t *pbMem;
7214 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
7215 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
7216# ifdef IN_RING3
7217 pbMem = pTlbe->pbMappingR3;
7218# else
7219 pbMem = NULL;
7220# endif
7221 else
7222 {
7223 Assert(!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED)));
7224
7225 /*
7226 * Okay, something isn't quite right or needs refreshing.
7227 */
7228 /* Write to read only memory? */
7229 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
7230 {
7231 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
7232# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
7233/** @todo TLB: EPT isn't integrated into the TLB stuff, so we don't know whether
7234 * to trigger an \#PG or a VM nested paging exit here yet! */
7235 if (Walk.fFailed & PGM_WALKFAIL_EPT)
7236 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
7237# endif
7238 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
7239 }
7240
7241 /* Kernel memory accessed by userland? */
7242 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
7243 {
7244 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
7245# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
7246/** @todo TLB: See above. */
7247 if (Walk.fFailed & PGM_WALKFAIL_EPT)
7248 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
7249# endif
7250 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
7251 }
7252
7253 /*
7254 * Check if the physical page info needs updating.
7255 */
7256 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
7257# ifdef IN_RING3
7258 pbMem = pTlbe->pbMappingR3;
7259# else
7260 pbMem = NULL;
7261# endif
7262 else
7263 {
7264 pTlbe->pbMappingR3 = NULL;
7265 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;
7266 pbMem = NULL;
7267 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
7268 &pbMem, &pTlbe->fFlagsAndPhysRev);
7269 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
7270# ifdef IN_RING3
7271 pTlbe->pbMappingR3 = pbMem;
7272# endif
7273 }
7274
7275 /*
7276 * Check the physical page level access and mapping.
7277 */
7278 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
7279 { /* probably likely */ }
7280 else
7281 {
7282 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, pbUnmapInfo, cbMem,
7283 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
7284 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
7285 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
7286 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
7287 if (rcStrict == VINF_SUCCESS)
7288 return pbMem;
7289 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7290 }
7291 }
7292 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
7293
7294 if (pbMem)
7295 {
7296 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
7297 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
7298 fAccess |= IEM_ACCESS_NOT_LOCKED;
7299 }
7300 else
7301 {
7302 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
7303 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
7304 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7305 if (rcStrict == VINF_SUCCESS)
7306 {
7307 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
7308 return pbMem;
7309 }
7310 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7311 }
7312
7313 void * const pvMem = pbMem;
7314
7315 if (fAccess & IEM_ACCESS_TYPE_WRITE)
7316 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
7317 if (fAccess & IEM_ACCESS_TYPE_READ)
7318 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
7319
7320#else /* !IEM_WITH_DATA_TLB */
7321
7322
7323 RTGCPHYS GCPhysFirst;
7324 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
7325 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
7326 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7327
7328 if (fAccess & IEM_ACCESS_TYPE_WRITE)
7329 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
7330 if (fAccess & IEM_ACCESS_TYPE_READ)
7331 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
7332
7333 void *pvMem;
7334 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7335 if (rcStrict == VINF_SUCCESS)
7336 { /* likely */ }
7337 else
7338 {
7339 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
7340 if (rcStrict == VINF_SUCCESS)
7341 return pvMem;
7342 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7343 }
7344
7345#endif /* !IEM_WITH_DATA_TLB */
7346
7347 /*
7348 * Fill in the mapping table entry.
7349 */
7350 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
7351 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
7352 pVCpu->iem.s.iNextMapping = iMemMap + 1;
7353 pVCpu->iem.s.cActiveMappings++;
7354
7355 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
7356
7357 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
7358 return pvMem;
7359}
7360
7361
7362/** @see iemMemMapJmp */
7363static void *iemMemMapSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
7364 uint32_t fAccess, uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
7365{
7366 return iemMemMapJmp<true /*a_fSafeCall*/>(pVCpu, pbUnmapInfo, cbMem, iSegReg, GCPtrMem, fAccess, uAlignCtl);
7367}
7368
7369
7370/**
7371 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
7372 *
7373 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7374 * @param pvMem The mapping.
7375 * @param fAccess The kind of access.
7376 */
7377void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7378{
7379 uintptr_t const iMemMap = bUnmapInfo & 0x7;
7380 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
7381 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
7382 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
7383 == ((unsigned)bUnmapInfo >> 4),
7384 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
7385
7386 /* If it's bounce buffered, we may need to write back the buffer. */
7387 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7388 {
7389 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7390 {
7391 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
7392 if (rcStrict == VINF_SUCCESS)
7393 return;
7394 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7395 }
7396 }
7397 /* Otherwise unlock it. */
7398 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
7399 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7400
7401 /* Free the entry. */
7402 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7403 Assert(pVCpu->iem.s.cActiveMappings != 0);
7404 pVCpu->iem.s.cActiveMappings--;
7405}
7406
7407
7408/** Fallback for iemMemCommitAndUnmapRwJmp. */
7409void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7410{
7411 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
7412 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7413}
7414
7415
7416/** Fallback for iemMemCommitAndUnmapAtJmp. */
7417void iemMemCommitAndUnmapAtSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7418{
7419 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
7420 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7421}
7422
7423
7424/** Fallback for iemMemCommitAndUnmapWoJmp. */
7425void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7426{
7427 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
7428 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7429}
7430
7431
7432/** Fallback for iemMemCommitAndUnmapRoJmp. */
7433void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7434{
7435 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_READ);
7436 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7437}
7438
7439
7440/** Fallback for iemMemRollbackAndUnmapWo. */
7441void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7442{
7443 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
7444 iemMemRollbackAndUnmap(pVCpu, bUnmapInfo);
7445}
7446
7447#endif /* IEM_WITH_SETJMP */
7448
7449#ifndef IN_RING3
7450/**
7451 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
7452 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
7453 *
7454 * Allows the instruction to be completed and retired, while the IEM user will
7455 * return to ring-3 immediately afterwards and do the postponed writes there.
7456 *
7457 * @returns VBox status code (no strict statuses). Caller must check
7458 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
7459 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7460 * @param pvMem The mapping.
7461 * @param fAccess The kind of access.
7462 */
7463VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7464{
7465 uintptr_t const iMemMap = bUnmapInfo & 0x7;
7466 AssertMsgReturn( (bUnmapInfo & 0x08)
7467 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
7468 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
7469 == ((unsigned)bUnmapInfo >> 4),
7470 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
7471 VERR_NOT_FOUND);
7472
7473 /* If it's bounce buffered, we may need to write back the buffer. */
7474 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7475 {
7476 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7477 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
7478 }
7479 /* Otherwise unlock it. */
7480 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
7481 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7482
7483 /* Free the entry. */
7484 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7485 Assert(pVCpu->iem.s.cActiveMappings != 0);
7486 pVCpu->iem.s.cActiveMappings--;
7487 return VINF_SUCCESS;
7488}
7489#endif
7490
7491
7492/**
7493 * Rollbacks mappings, releasing page locks and such.
7494 *
7495 * The caller shall only call this after checking cActiveMappings.
7496 *
7497 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7498 */
7499void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
7500{
7501 Assert(pVCpu->iem.s.cActiveMappings > 0);
7502
7503 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
7504 while (iMemMap-- > 0)
7505 {
7506 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
7507 if (fAccess != IEM_ACCESS_INVALID)
7508 {
7509 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
7510 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7511 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
7512 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7513 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
7514 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
7515 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
7516 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
7517 pVCpu->iem.s.cActiveMappings--;
7518 }
7519 }
7520}
7521
7522
7523/*
7524 * Instantiate R/W templates.
7525 */
7526#define TMPL_MEM_WITH_STACK
7527
7528#define TMPL_MEM_TYPE uint8_t
7529#define TMPL_MEM_FN_SUFF U8
7530#define TMPL_MEM_FMT_TYPE "%#04x"
7531#define TMPL_MEM_FMT_DESC "byte"
7532#include "IEMAllMemRWTmpl.cpp.h"
7533
7534#define TMPL_MEM_TYPE uint16_t
7535#define TMPL_MEM_FN_SUFF U16
7536#define TMPL_MEM_FMT_TYPE "%#06x"
7537#define TMPL_MEM_FMT_DESC "word"
7538#include "IEMAllMemRWTmpl.cpp.h"
7539
7540#define TMPL_WITH_PUSH_SREG
7541#define TMPL_MEM_TYPE uint32_t
7542#define TMPL_MEM_FN_SUFF U32
7543#define TMPL_MEM_FMT_TYPE "%#010x"
7544#define TMPL_MEM_FMT_DESC "dword"
7545#include "IEMAllMemRWTmpl.cpp.h"
7546#undef TMPL_WITH_PUSH_SREG
7547
7548#define TMPL_MEM_TYPE uint64_t
7549#define TMPL_MEM_FN_SUFF U64
7550#define TMPL_MEM_FMT_TYPE "%#018RX64"
7551#define TMPL_MEM_FMT_DESC "qword"
7552#include "IEMAllMemRWTmpl.cpp.h"
7553
7554#undef TMPL_MEM_WITH_STACK
7555
7556#define TMPL_MEM_TYPE uint64_t
7557#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) * 2 - 1)
7558#define TMPL_MEM_FN_SUFF U64AlignedU128
7559#define TMPL_MEM_FMT_TYPE "%#018RX64"
7560#define TMPL_MEM_FMT_DESC "qword"
7561#include "IEMAllMemRWTmpl.cpp.h"
7562
7563/* See IEMAllMemRWTmplInline.cpp.h */
7564#define TMPL_MEM_BY_REF
7565
7566#define TMPL_MEM_TYPE RTFLOAT80U
7567#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1)
7568#define TMPL_MEM_FN_SUFF R80
7569#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
7570#define TMPL_MEM_FMT_DESC "tword"
7571#include "IEMAllMemRWTmpl.cpp.h"
7572
7573#define TMPL_MEM_TYPE RTPBCD80U
7574#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1) /** @todo testcase: 80-bit BCD alignment */
7575#define TMPL_MEM_FN_SUFF D80
7576#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
7577#define TMPL_MEM_FMT_DESC "tword"
7578#include "IEMAllMemRWTmpl.cpp.h"
7579
7580#define TMPL_MEM_TYPE RTUINT128U
7581#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT128U) - 1)
7582#define TMPL_MEM_FN_SUFF U128
7583#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7584#define TMPL_MEM_FMT_DESC "dqword"
7585#include "IEMAllMemRWTmpl.cpp.h"
7586
7587#define TMPL_MEM_TYPE RTUINT128U
7588#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT128U) - 1)
7589#define TMPL_MEM_MAP_FLAGS_ADD (IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE)
7590#define TMPL_MEM_FN_SUFF U128AlignedSse
7591#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7592#define TMPL_MEM_FMT_DESC "dqword"
7593#include "IEMAllMemRWTmpl.cpp.h"
7594
7595#define TMPL_MEM_TYPE RTUINT128U
7596#define TMPL_MEM_TYPE_ALIGN 0
7597#define TMPL_MEM_FN_SUFF U128NoAc
7598#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7599#define TMPL_MEM_FMT_DESC "dqword"
7600#include "IEMAllMemRWTmpl.cpp.h"
7601
7602#define TMPL_MEM_TYPE RTUINT256U
7603#define TMPL_MEM_TYPE_ALIGN 0
7604#define TMPL_MEM_FN_SUFF U256NoAc
7605#define TMPL_MEM_FMT_TYPE "%.32Rhxs"
7606#define TMPL_MEM_FMT_DESC "qqword"
7607#include "IEMAllMemRWTmpl.cpp.h"
7608
7609#define TMPL_MEM_TYPE RTUINT256U
7610#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT256U) - 1)
7611#define TMPL_MEM_MAP_FLAGS_ADD IEM_MEMMAP_F_ALIGN_GP
7612#define TMPL_MEM_FN_SUFF U256AlignedAvx
7613#define TMPL_MEM_FMT_TYPE "%.32Rhxs"
7614#define TMPL_MEM_FMT_DESC "qqword"
7615#include "IEMAllMemRWTmpl.cpp.h"
7616
7617/**
7618 * Fetches a data dword and zero extends it to a qword.
7619 *
7620 * @returns Strict VBox status code.
7621 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7622 * @param pu64Dst Where to return the qword.
7623 * @param iSegReg The index of the segment register to use for
7624 * this access. The base and limits are checked.
7625 * @param GCPtrMem The address of the guest memory.
7626 */
7627VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7628{
7629 /* The lazy approach for now... */
7630 uint8_t bUnmapInfo;
7631 uint32_t const *pu32Src;
7632 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem,
7633 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7634 if (rc == VINF_SUCCESS)
7635 {
7636 *pu64Dst = *pu32Src;
7637 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7638 Log(("IEM RD dword %d|%RGv: %#010RX64\n", iSegReg, GCPtrMem, *pu64Dst));
7639 }
7640 return rc;
7641}
7642
7643
7644#ifdef SOME_UNUSED_FUNCTION
7645/**
7646 * Fetches a data dword and sign extends it to a qword.
7647 *
7648 * @returns Strict VBox status code.
7649 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7650 * @param pu64Dst Where to return the sign extended value.
7651 * @param iSegReg The index of the segment register to use for
7652 * this access. The base and limits are checked.
7653 * @param GCPtrMem The address of the guest memory.
7654 */
7655VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7656{
7657 /* The lazy approach for now... */
7658 uint8_t bUnmapInfo;
7659 int32_t const *pi32Src;
7660 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, &bUnmapInfo, sizeof(*pi32Src), iSegReg, GCPtrMem,
7661 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
7662 if (rc == VINF_SUCCESS)
7663 {
7664 *pu64Dst = *pi32Src;
7665 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7666 Log(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, (uint32_t)*pu64Dst));
7667 }
7668#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7669 else
7670 *pu64Dst = 0;
7671#endif
7672 return rc;
7673}
7674#endif
7675
7676
7677/**
7678 * Fetches a descriptor register (lgdt, lidt).
7679 *
7680 * @returns Strict VBox status code.
7681 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7682 * @param pcbLimit Where to return the limit.
7683 * @param pGCPtrBase Where to return the base.
7684 * @param iSegReg The index of the segment register to use for
7685 * this access. The base and limits are checked.
7686 * @param GCPtrMem The address of the guest memory.
7687 * @param enmOpSize The effective operand size.
7688 */
7689VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7690 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7691{
7692 /*
7693 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7694 * little special:
7695 * - The two reads are done separately.
7696 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7697 * - We suspect the 386 to actually commit the limit before the base in
7698 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7699 * don't try emulate this eccentric behavior, because it's not well
7700 * enough understood and rather hard to trigger.
7701 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7702 */
7703 VBOXSTRICTRC rcStrict;
7704 if (IEM_IS_64BIT_CODE(pVCpu))
7705 {
7706 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7707 if (rcStrict == VINF_SUCCESS)
7708 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7709 }
7710 else
7711 {
7712 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7713 if (enmOpSize == IEMMODE_32BIT)
7714 {
7715 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7716 {
7717 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7718 if (rcStrict == VINF_SUCCESS)
7719 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7720 }
7721 else
7722 {
7723 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7724 if (rcStrict == VINF_SUCCESS)
7725 {
7726 *pcbLimit = (uint16_t)uTmp;
7727 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7728 }
7729 }
7730 if (rcStrict == VINF_SUCCESS)
7731 *pGCPtrBase = uTmp;
7732 }
7733 else
7734 {
7735 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7736 if (rcStrict == VINF_SUCCESS)
7737 {
7738 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7739 if (rcStrict == VINF_SUCCESS)
7740 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7741 }
7742 }
7743 }
7744 return rcStrict;
7745}
7746
7747
7748/**
7749 * Stores a data dqword, SSE aligned.
7750 *
7751 * @returns Strict VBox status code.
7752 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7753 * @param iSegReg The index of the segment register to use for
7754 * this access. The base and limits are checked.
7755 * @param GCPtrMem The address of the guest memory.
7756 * @param u128Value The value to store.
7757 */
7758VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7759{
7760 /* The lazy approach for now... */
7761 uint8_t bUnmapInfo;
7762 PRTUINT128U pu128Dst;
7763 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7764 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7765 if (rc == VINF_SUCCESS)
7766 {
7767 pu128Dst->au64[0] = u128Value.au64[0];
7768 pu128Dst->au64[1] = u128Value.au64[1];
7769 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7770 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7771 }
7772 return rc;
7773}
7774
7775
7776#ifdef IEM_WITH_SETJMP
7777/**
7778 * Stores a data dqword, SSE aligned.
7779 *
7780 * @returns Strict VBox status code.
7781 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7782 * @param iSegReg The index of the segment register to use for
7783 * this access. The base and limits are checked.
7784 * @param GCPtrMem The address of the guest memory.
7785 * @param u128Value The value to store.
7786 */
7787void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7788 RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7789{
7790 /* The lazy approach for now... */
7791 uint8_t bUnmapInfo;
7792 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7793 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7794 pu128Dst->au64[0] = u128Value.au64[0];
7795 pu128Dst->au64[1] = u128Value.au64[1];
7796 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7797 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7798}
7799#endif
7800
7801
7802/**
7803 * Stores a data dqword.
7804 *
7805 * @returns Strict VBox status code.
7806 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7807 * @param iSegReg The index of the segment register to use for
7808 * this access. The base and limits are checked.
7809 * @param GCPtrMem The address of the guest memory.
7810 * @param pu256Value Pointer to the value to store.
7811 */
7812VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7813{
7814 /* The lazy approach for now... */
7815 uint8_t bUnmapInfo;
7816 PRTUINT256U pu256Dst;
7817 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7818 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7819 if (rc == VINF_SUCCESS)
7820 {
7821 pu256Dst->au64[0] = pu256Value->au64[0];
7822 pu256Dst->au64[1] = pu256Value->au64[1];
7823 pu256Dst->au64[2] = pu256Value->au64[2];
7824 pu256Dst->au64[3] = pu256Value->au64[3];
7825 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7826 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7827 }
7828 return rc;
7829}
7830
7831
7832#ifdef IEM_WITH_SETJMP
7833/**
7834 * Stores a data dqword, longjmp on error.
7835 *
7836 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7837 * @param iSegReg The index of the segment register to use for
7838 * this access. The base and limits are checked.
7839 * @param GCPtrMem The address of the guest memory.
7840 * @param pu256Value Pointer to the value to store.
7841 */
7842void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7843{
7844 /* The lazy approach for now... */
7845 uint8_t bUnmapInfo;
7846 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7847 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7848 pu256Dst->au64[0] = pu256Value->au64[0];
7849 pu256Dst->au64[1] = pu256Value->au64[1];
7850 pu256Dst->au64[2] = pu256Value->au64[2];
7851 pu256Dst->au64[3] = pu256Value->au64[3];
7852 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7853 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7854}
7855#endif
7856
7857
7858/**
7859 * Stores a descriptor register (sgdt, sidt).
7860 *
7861 * @returns Strict VBox status code.
7862 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7863 * @param cbLimit The limit.
7864 * @param GCPtrBase The base address.
7865 * @param iSegReg The index of the segment register to use for
7866 * this access. The base and limits are checked.
7867 * @param GCPtrMem The address of the guest memory.
7868 */
7869VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7870{
7871 /*
7872 * The SIDT and SGDT instructions actually stores the data using two
7873 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
7874 * does not respond to opsize prefixes.
7875 */
7876 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
7877 if (rcStrict == VINF_SUCCESS)
7878 {
7879 if (IEM_IS_16BIT_CODE(pVCpu))
7880 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
7881 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
7882 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
7883 else if (IEM_IS_32BIT_CODE(pVCpu))
7884 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
7885 else
7886 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
7887 }
7888 return rcStrict;
7889}
7890
7891
7892/**
7893 * Begin a special stack push (used by interrupt, exceptions and such).
7894 *
7895 * This will raise \#SS or \#PF if appropriate.
7896 *
7897 * @returns Strict VBox status code.
7898 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7899 * @param cbMem The number of bytes to push onto the stack.
7900 * @param cbAlign The alignment mask (7, 3, 1).
7901 * @param ppvMem Where to return the pointer to the stack memory.
7902 * As with the other memory functions this could be
7903 * direct access or bounce buffered access, so
7904 * don't commit register until the commit call
7905 * succeeds.
7906 * @param pbUnmapInfo Where to store unmap info for
7907 * iemMemStackPushCommitSpecial.
7908 * @param puNewRsp Where to return the new RSP value. This must be
7909 * passed unchanged to
7910 * iemMemStackPushCommitSpecial().
7911 */
7912VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
7913 void **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
7914{
7915 Assert(cbMem < UINT8_MAX);
7916 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
7917 return iemMemMap(pVCpu, ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W, cbAlign);
7918}
7919
7920
7921/**
7922 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
7923 *
7924 * This will update the rSP.
7925 *
7926 * @returns Strict VBox status code.
7927 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7928 * @param bUnmapInfo Unmap info set by iemMemStackPushBeginSpecial.
7929 * @param uNewRsp The new RSP value returned by
7930 * iemMemStackPushBeginSpecial().
7931 */
7932VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo, uint64_t uNewRsp) RT_NOEXCEPT
7933{
7934 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7935 if (rcStrict == VINF_SUCCESS)
7936 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7937 return rcStrict;
7938}
7939
7940
7941/**
7942 * Begin a special stack pop (used by iret, retf and such).
7943 *
7944 * This will raise \#SS or \#PF if appropriate.
7945 *
7946 * @returns Strict VBox status code.
7947 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7948 * @param cbMem The number of bytes to pop from the stack.
7949 * @param cbAlign The alignment mask (7, 3, 1).
7950 * @param ppvMem Where to return the pointer to the stack memory.
7951 * @param pbUnmapInfo Where to store unmap info for
7952 * iemMemStackPopDoneSpecial.
7953 * @param puNewRsp Where to return the new RSP value. This must be
7954 * assigned to CPUMCTX::rsp manually some time
7955 * after iemMemStackPopDoneSpecial() has been
7956 * called.
7957 */
7958VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
7959 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
7960{
7961 Assert(cbMem < UINT8_MAX);
7962 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
7963 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
7964}
7965
7966
7967/**
7968 * Continue a special stack pop (used by iret and retf), for the purpose of
7969 * retrieving a new stack pointer.
7970 *
7971 * This will raise \#SS or \#PF if appropriate.
7972 *
7973 * @returns Strict VBox status code.
7974 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7975 * @param off Offset from the top of the stack. This is zero
7976 * except in the retf case.
7977 * @param cbMem The number of bytes to pop from the stack.
7978 * @param ppvMem Where to return the pointer to the stack memory.
7979 * @param pbUnmapInfo Where to store unmap info for
7980 * iemMemStackPopDoneSpecial.
7981 * @param uCurNewRsp The current uncommitted RSP value. (No need to
7982 * return this because all use of this function is
7983 * to retrieve a new value and anything we return
7984 * here would be discarded.)
7985 */
7986VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
7987 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t uCurNewRsp) RT_NOEXCEPT
7988{
7989 Assert(cbMem < UINT8_MAX);
7990
7991 /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */
7992 RTGCPTR GCPtrTop;
7993 if (IEM_IS_64BIT_CODE(pVCpu))
7994 GCPtrTop = uCurNewRsp;
7995 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
7996 GCPtrTop = (uint32_t)uCurNewRsp;
7997 else
7998 GCPtrTop = (uint16_t)uCurNewRsp;
7999
8000 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
8001 0 /* checked in iemMemStackPopBeginSpecial */);
8002}
8003
8004
8005/**
8006 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
8007 * iemMemStackPopContinueSpecial).
8008 *
8009 * The caller will manually commit the rSP.
8010 *
8011 * @returns Strict VBox status code.
8012 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8013 * @param bUnmapInfo Unmap information returned by
8014 * iemMemStackPopBeginSpecial() or
8015 * iemMemStackPopContinueSpecial().
8016 */
8017VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
8018{
8019 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8020}
8021
8022
8023/**
8024 * Fetches a system table byte.
8025 *
8026 * @returns Strict VBox status code.
8027 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8028 * @param pbDst Where to return the byte.
8029 * @param iSegReg The index of the segment register to use for
8030 * this access. The base and limits are checked.
8031 * @param GCPtrMem The address of the guest memory.
8032 */
8033VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8034{
8035 /* The lazy approach for now... */
8036 uint8_t bUnmapInfo;
8037 uint8_t const *pbSrc;
8038 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, &bUnmapInfo, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8039 if (rc == VINF_SUCCESS)
8040 {
8041 *pbDst = *pbSrc;
8042 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8043 }
8044 return rc;
8045}
8046
8047
8048/**
8049 * Fetches a system table word.
8050 *
8051 * @returns Strict VBox status code.
8052 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8053 * @param pu16Dst Where to return the word.
8054 * @param iSegReg The index of the segment register to use for
8055 * this access. The base and limits are checked.
8056 * @param GCPtrMem The address of the guest memory.
8057 */
8058VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8059{
8060 /* The lazy approach for now... */
8061 uint8_t bUnmapInfo;
8062 uint16_t const *pu16Src;
8063 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, &bUnmapInfo, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8064 if (rc == VINF_SUCCESS)
8065 {
8066 *pu16Dst = *pu16Src;
8067 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8068 }
8069 return rc;
8070}
8071
8072
8073/**
8074 * Fetches a system table dword.
8075 *
8076 * @returns Strict VBox status code.
8077 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8078 * @param pu32Dst Where to return the dword.
8079 * @param iSegReg The index of the segment register to use for
8080 * this access. The base and limits are checked.
8081 * @param GCPtrMem The address of the guest memory.
8082 */
8083VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8084{
8085 /* The lazy approach for now... */
8086 uint8_t bUnmapInfo;
8087 uint32_t const *pu32Src;
8088 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8089 if (rc == VINF_SUCCESS)
8090 {
8091 *pu32Dst = *pu32Src;
8092 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8093 }
8094 return rc;
8095}
8096
8097
8098/**
8099 * Fetches a system table qword.
8100 *
8101 * @returns Strict VBox status code.
8102 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8103 * @param pu64Dst Where to return the qword.
8104 * @param iSegReg The index of the segment register to use for
8105 * this access. The base and limits are checked.
8106 * @param GCPtrMem The address of the guest memory.
8107 */
8108VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8109{
8110 /* The lazy approach for now... */
8111 uint8_t bUnmapInfo;
8112 uint64_t const *pu64Src;
8113 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, &bUnmapInfo, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8114 if (rc == VINF_SUCCESS)
8115 {
8116 *pu64Dst = *pu64Src;
8117 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8118 }
8119 return rc;
8120}
8121
8122
8123/**
8124 * Fetches a descriptor table entry with caller specified error code.
8125 *
8126 * @returns Strict VBox status code.
8127 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8128 * @param pDesc Where to return the descriptor table entry.
8129 * @param uSel The selector which table entry to fetch.
8130 * @param uXcpt The exception to raise on table lookup error.
8131 * @param uErrorCode The error code associated with the exception.
8132 */
8133static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
8134 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
8135{
8136 AssertPtr(pDesc);
8137 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
8138
8139 /** @todo did the 286 require all 8 bytes to be accessible? */
8140 /*
8141 * Get the selector table base and check bounds.
8142 */
8143 RTGCPTR GCPtrBase;
8144 if (uSel & X86_SEL_LDT)
8145 {
8146 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
8147 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
8148 {
8149 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8150 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
8151 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8152 uErrorCode, 0);
8153 }
8154
8155 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
8156 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
8157 }
8158 else
8159 {
8160 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
8161 {
8162 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
8163 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8164 uErrorCode, 0);
8165 }
8166 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
8167 }
8168
8169 /*
8170 * Read the legacy descriptor and maybe the long mode extensions if
8171 * required.
8172 */
8173 VBOXSTRICTRC rcStrict;
8174 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
8175 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8176 else
8177 {
8178 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
8179 if (rcStrict == VINF_SUCCESS)
8180 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
8181 if (rcStrict == VINF_SUCCESS)
8182 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
8183 if (rcStrict == VINF_SUCCESS)
8184 pDesc->Legacy.au16[3] = 0;
8185 else
8186 return rcStrict;
8187 }
8188
8189 if (rcStrict == VINF_SUCCESS)
8190 {
8191 if ( !IEM_IS_LONG_MODE(pVCpu)
8192 || pDesc->Legacy.Gen.u1DescType)
8193 pDesc->Long.au64[1] = 0;
8194 else if ( (uint32_t)(uSel | X86_SEL_RPL_LDT) + 8
8195 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
8196 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8197 else
8198 {
8199 LogEx(LOG_GROUP_IEM,("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8200 /** @todo is this the right exception? */
8201 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8202 }
8203 }
8204 return rcStrict;
8205}
8206
8207
8208/**
8209 * Fetches a descriptor table entry.
8210 *
8211 * @returns Strict VBox status code.
8212 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8213 * @param pDesc Where to return the descriptor table entry.
8214 * @param uSel The selector which table entry to fetch.
8215 * @param uXcpt The exception to raise on table lookup error.
8216 */
8217VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
8218{
8219 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8220}
8221
8222
8223/**
8224 * Marks the selector descriptor as accessed (only non-system descriptors).
8225 *
8226 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8227 * will therefore skip the limit checks.
8228 *
8229 * @returns Strict VBox status code.
8230 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8231 * @param uSel The selector.
8232 */
8233VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
8234{
8235 /*
8236 * Get the selector table base and calculate the entry address.
8237 */
8238 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8239 ? pVCpu->cpum.GstCtx.ldtr.u64Base
8240 : pVCpu->cpum.GstCtx.gdtr.pGdt;
8241 GCPtr += uSel & X86_SEL_MASK;
8242
8243 /*
8244 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8245 * ugly stuff to avoid this. This will make sure it's an atomic access
8246 * as well more or less remove any question about 8-bit or 32-bit accesss.
8247 */
8248 VBOXSTRICTRC rcStrict;
8249 uint8_t bUnmapInfo;
8250 uint32_t volatile *pu32;
8251 if ((GCPtr & 3) == 0)
8252 {
8253 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8254 GCPtr += 2 + 2;
8255 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8256 if (rcStrict != VINF_SUCCESS)
8257 return rcStrict;
8258 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8259 }
8260 else
8261 {
8262 /* The misaligned GDT/LDT case, map the whole thing. */
8263 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8264 if (rcStrict != VINF_SUCCESS)
8265 return rcStrict;
8266 switch ((uintptr_t)pu32 & 3)
8267 {
8268 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8269 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8270 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8271 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8272 }
8273 }
8274
8275 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8276}
8277
8278
8279#undef LOG_GROUP
8280#define LOG_GROUP LOG_GROUP_IEM
8281
8282/** @} */
8283
8284/** @name Opcode Helpers.
8285 * @{
8286 */
8287
8288/**
8289 * Calculates the effective address of a ModR/M memory operand.
8290 *
8291 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8292 *
8293 * @return Strict VBox status code.
8294 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8295 * @param bRm The ModRM byte.
8296 * @param cbImmAndRspOffset - First byte: The size of any immediate
8297 * following the effective address opcode bytes
8298 * (only for RIP relative addressing).
8299 * - Second byte: RSP displacement (for POP [ESP]).
8300 * @param pGCPtrEff Where to return the effective address.
8301 */
8302VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
8303{
8304 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8305# define SET_SS_DEF() \
8306 do \
8307 { \
8308 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8309 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8310 } while (0)
8311
8312 if (!IEM_IS_64BIT_CODE(pVCpu))
8313 {
8314/** @todo Check the effective address size crap! */
8315 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8316 {
8317 uint16_t u16EffAddr;
8318
8319 /* Handle the disp16 form with no registers first. */
8320 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8321 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8322 else
8323 {
8324 /* Get the displacment. */
8325 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8326 {
8327 case 0: u16EffAddr = 0; break;
8328 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8329 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8330 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8331 }
8332
8333 /* Add the base and index registers to the disp. */
8334 switch (bRm & X86_MODRM_RM_MASK)
8335 {
8336 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8337 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8338 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8339 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8340 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8341 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8342 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8343 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8344 }
8345 }
8346
8347 *pGCPtrEff = u16EffAddr;
8348 }
8349 else
8350 {
8351 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8352 uint32_t u32EffAddr;
8353
8354 /* Handle the disp32 form with no registers first. */
8355 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8356 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8357 else
8358 {
8359 /* Get the register (or SIB) value. */
8360 switch ((bRm & X86_MODRM_RM_MASK))
8361 {
8362 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8363 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8364 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8365 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8366 case 4: /* SIB */
8367 {
8368 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8369
8370 /* Get the index and scale it. */
8371 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8372 {
8373 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8374 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8375 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8376 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8377 case 4: u32EffAddr = 0; /*none */ break;
8378 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8379 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8380 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8381 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8382 }
8383 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8384
8385 /* add base */
8386 switch (bSib & X86_SIB_BASE_MASK)
8387 {
8388 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8389 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8390 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8391 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8392 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8393 case 5:
8394 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8395 {
8396 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8397 SET_SS_DEF();
8398 }
8399 else
8400 {
8401 uint32_t u32Disp;
8402 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8403 u32EffAddr += u32Disp;
8404 }
8405 break;
8406 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8407 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8408 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8409 }
8410 break;
8411 }
8412 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8413 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8414 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8415 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8416 }
8417
8418 /* Get and add the displacement. */
8419 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8420 {
8421 case 0:
8422 break;
8423 case 1:
8424 {
8425 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8426 u32EffAddr += i8Disp;
8427 break;
8428 }
8429 case 2:
8430 {
8431 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8432 u32EffAddr += u32Disp;
8433 break;
8434 }
8435 default:
8436 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8437 }
8438
8439 }
8440 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8441 *pGCPtrEff = u32EffAddr;
8442 }
8443 }
8444 else
8445 {
8446 uint64_t u64EffAddr;
8447
8448 /* Handle the rip+disp32 form with no registers first. */
8449 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8450 {
8451 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8452 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8453 }
8454 else
8455 {
8456 /* Get the register (or SIB) value. */
8457 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8458 {
8459 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8460 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8461 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8462 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8463 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8464 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8465 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8466 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8467 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8468 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8469 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8470 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8471 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8472 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8473 /* SIB */
8474 case 4:
8475 case 12:
8476 {
8477 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8478
8479 /* Get the index and scale it. */
8480 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8481 {
8482 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8483 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8484 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8485 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8486 case 4: u64EffAddr = 0; /*none */ break;
8487 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8488 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8489 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8490 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8491 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8492 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8493 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8494 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8495 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8496 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8497 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8498 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8499 }
8500 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8501
8502 /* add base */
8503 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8504 {
8505 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8506 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8507 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8508 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8509 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8510 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8511 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8512 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8513 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8514 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8515 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8516 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8517 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8518 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8519 /* complicated encodings */
8520 case 5:
8521 case 13:
8522 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8523 {
8524 if (!pVCpu->iem.s.uRexB)
8525 {
8526 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8527 SET_SS_DEF();
8528 }
8529 else
8530 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8531 }
8532 else
8533 {
8534 uint32_t u32Disp;
8535 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8536 u64EffAddr += (int32_t)u32Disp;
8537 }
8538 break;
8539 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8540 }
8541 break;
8542 }
8543 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8544 }
8545
8546 /* Get and add the displacement. */
8547 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8548 {
8549 case 0:
8550 break;
8551 case 1:
8552 {
8553 int8_t i8Disp;
8554 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8555 u64EffAddr += i8Disp;
8556 break;
8557 }
8558 case 2:
8559 {
8560 uint32_t u32Disp;
8561 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8562 u64EffAddr += (int32_t)u32Disp;
8563 break;
8564 }
8565 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8566 }
8567
8568 }
8569
8570 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8571 *pGCPtrEff = u64EffAddr;
8572 else
8573 {
8574 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8575 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8576 }
8577 }
8578
8579 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
8580 return VINF_SUCCESS;
8581}
8582
8583
8584#ifdef IEM_WITH_SETJMP
8585/**
8586 * Calculates the effective address of a ModR/M memory operand.
8587 *
8588 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8589 *
8590 * May longjmp on internal error.
8591 *
8592 * @return The effective address.
8593 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8594 * @param bRm The ModRM byte.
8595 * @param cbImmAndRspOffset - First byte: The size of any immediate
8596 * following the effective address opcode bytes
8597 * (only for RIP relative addressing).
8598 * - Second byte: RSP displacement (for POP [ESP]).
8599 */
8600RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP
8601{
8602 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
8603# define SET_SS_DEF() \
8604 do \
8605 { \
8606 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8607 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8608 } while (0)
8609
8610 if (!IEM_IS_64BIT_CODE(pVCpu))
8611 {
8612/** @todo Check the effective address size crap! */
8613 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8614 {
8615 uint16_t u16EffAddr;
8616
8617 /* Handle the disp16 form with no registers first. */
8618 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8619 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8620 else
8621 {
8622 /* Get the displacment. */
8623 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8624 {
8625 case 0: u16EffAddr = 0; break;
8626 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8627 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8628 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */
8629 }
8630
8631 /* Add the base and index registers to the disp. */
8632 switch (bRm & X86_MODRM_RM_MASK)
8633 {
8634 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8635 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8636 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8637 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8638 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8639 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8640 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8641 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8642 }
8643 }
8644
8645 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
8646 return u16EffAddr;
8647 }
8648
8649 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8650 uint32_t u32EffAddr;
8651
8652 /* Handle the disp32 form with no registers first. */
8653 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8654 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8655 else
8656 {
8657 /* Get the register (or SIB) value. */
8658 switch ((bRm & X86_MODRM_RM_MASK))
8659 {
8660 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8661 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8662 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8663 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8664 case 4: /* SIB */
8665 {
8666 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8667
8668 /* Get the index and scale it. */
8669 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8670 {
8671 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8672 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8673 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8674 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8675 case 4: u32EffAddr = 0; /*none */ break;
8676 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8677 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8678 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8679 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8680 }
8681 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8682
8683 /* add base */
8684 switch (bSib & X86_SIB_BASE_MASK)
8685 {
8686 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8687 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8688 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8689 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8690 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8691 case 5:
8692 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8693 {
8694 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8695 SET_SS_DEF();
8696 }
8697 else
8698 {
8699 uint32_t u32Disp;
8700 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8701 u32EffAddr += u32Disp;
8702 }
8703 break;
8704 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8705 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8706 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8707 }
8708 break;
8709 }
8710 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8711 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8712 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8713 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8714 }
8715
8716 /* Get and add the displacement. */
8717 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8718 {
8719 case 0:
8720 break;
8721 case 1:
8722 {
8723 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8724 u32EffAddr += i8Disp;
8725 break;
8726 }
8727 case 2:
8728 {
8729 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8730 u32EffAddr += u32Disp;
8731 break;
8732 }
8733 default:
8734 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */
8735 }
8736 }
8737
8738 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8739 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
8740 return u32EffAddr;
8741 }
8742
8743 uint64_t u64EffAddr;
8744
8745 /* Handle the rip+disp32 form with no registers first. */
8746 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8747 {
8748 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8749 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8750 }
8751 else
8752 {
8753 /* Get the register (or SIB) value. */
8754 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8755 {
8756 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8757 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8758 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8759 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8760 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8761 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8762 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8763 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8764 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8765 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8766 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8767 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8768 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8769 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8770 /* SIB */
8771 case 4:
8772 case 12:
8773 {
8774 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8775
8776 /* Get the index and scale it. */
8777 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8778 {
8779 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8780 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8781 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8782 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8783 case 4: u64EffAddr = 0; /*none */ break;
8784 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8785 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8786 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8787 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8788 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8789 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8790 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8791 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8792 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8793 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8794 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8795 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8796 }
8797 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8798
8799 /* add base */
8800 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8801 {
8802 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8803 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8804 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8805 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8806 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8807 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8808 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8809 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8810 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8811 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8812 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8813 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8814 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8815 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8816 /* complicated encodings */
8817 case 5:
8818 case 13:
8819 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8820 {
8821 if (!pVCpu->iem.s.uRexB)
8822 {
8823 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8824 SET_SS_DEF();
8825 }
8826 else
8827 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8828 }
8829 else
8830 {
8831 uint32_t u32Disp;
8832 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8833 u64EffAddr += (int32_t)u32Disp;
8834 }
8835 break;
8836 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8837 }
8838 break;
8839 }
8840 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8841 }
8842
8843 /* Get and add the displacement. */
8844 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8845 {
8846 case 0:
8847 break;
8848 case 1:
8849 {
8850 int8_t i8Disp;
8851 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8852 u64EffAddr += i8Disp;
8853 break;
8854 }
8855 case 2:
8856 {
8857 uint32_t u32Disp;
8858 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8859 u64EffAddr += (int32_t)u32Disp;
8860 break;
8861 }
8862 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
8863 }
8864
8865 }
8866
8867 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8868 {
8869 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
8870 return u64EffAddr;
8871 }
8872 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8873 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
8874 return u64EffAddr & UINT32_MAX;
8875}
8876#endif /* IEM_WITH_SETJMP */
8877
8878
8879/**
8880 * Calculates the effective address of a ModR/M memory operand, extended version
8881 * for use in the recompilers.
8882 *
8883 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8884 *
8885 * @return Strict VBox status code.
8886 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8887 * @param bRm The ModRM byte.
8888 * @param cbImmAndRspOffset - First byte: The size of any immediate
8889 * following the effective address opcode bytes
8890 * (only for RIP relative addressing).
8891 * - Second byte: RSP displacement (for POP [ESP]).
8892 * @param pGCPtrEff Where to return the effective address.
8893 * @param puInfo Extra info: 32-bit displacement (bits 31:0) and
8894 * SIB byte (bits 39:32).
8895 */
8896VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT
8897{
8898 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8899# define SET_SS_DEF() \
8900 do \
8901 { \
8902 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8903 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8904 } while (0)
8905
8906 uint64_t uInfo;
8907 if (!IEM_IS_64BIT_CODE(pVCpu))
8908 {
8909/** @todo Check the effective address size crap! */
8910 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8911 {
8912 uint16_t u16EffAddr;
8913
8914 /* Handle the disp16 form with no registers first. */
8915 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8916 {
8917 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8918 uInfo = u16EffAddr;
8919 }
8920 else
8921 {
8922 /* Get the displacment. */
8923 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8924 {
8925 case 0: u16EffAddr = 0; break;
8926 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8927 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8928 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8929 }
8930 uInfo = u16EffAddr;
8931
8932 /* Add the base and index registers to the disp. */
8933 switch (bRm & X86_MODRM_RM_MASK)
8934 {
8935 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8936 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8937 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8938 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8939 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8940 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8941 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8942 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8943 }
8944 }
8945
8946 *pGCPtrEff = u16EffAddr;
8947 }
8948 else
8949 {
8950 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8951 uint32_t u32EffAddr;
8952
8953 /* Handle the disp32 form with no registers first. */
8954 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8955 {
8956 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8957 uInfo = u32EffAddr;
8958 }
8959 else
8960 {
8961 /* Get the register (or SIB) value. */
8962 uInfo = 0;
8963 switch ((bRm & X86_MODRM_RM_MASK))
8964 {
8965 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8966 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8967 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8968 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8969 case 4: /* SIB */
8970 {
8971 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8972 uInfo = (uint64_t)bSib << 32;
8973
8974 /* Get the index and scale it. */
8975 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8976 {
8977 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8978 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8979 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8980 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8981 case 4: u32EffAddr = 0; /*none */ break;
8982 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8983 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8984 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8985 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8986 }
8987 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8988
8989 /* add base */
8990 switch (bSib & X86_SIB_BASE_MASK)
8991 {
8992 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8993 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8994 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8995 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8996 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8997 case 5:
8998 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8999 {
9000 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9001 SET_SS_DEF();
9002 }
9003 else
9004 {
9005 uint32_t u32Disp;
9006 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9007 u32EffAddr += u32Disp;
9008 uInfo |= u32Disp;
9009 }
9010 break;
9011 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9012 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9013 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9014 }
9015 break;
9016 }
9017 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9018 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9019 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9020 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9021 }
9022
9023 /* Get and add the displacement. */
9024 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9025 {
9026 case 0:
9027 break;
9028 case 1:
9029 {
9030 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9031 u32EffAddr += i8Disp;
9032 uInfo |= (uint32_t)(int32_t)i8Disp;
9033 break;
9034 }
9035 case 2:
9036 {
9037 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9038 u32EffAddr += u32Disp;
9039 uInfo |= (uint32_t)u32Disp;
9040 break;
9041 }
9042 default:
9043 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
9044 }
9045
9046 }
9047 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9048 *pGCPtrEff = u32EffAddr;
9049 }
9050 }
9051 else
9052 {
9053 uint64_t u64EffAddr;
9054
9055 /* Handle the rip+disp32 form with no registers first. */
9056 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9057 {
9058 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9059 uInfo = (uint32_t)u64EffAddr;
9060 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
9061 }
9062 else
9063 {
9064 /* Get the register (or SIB) value. */
9065 uInfo = 0;
9066 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9067 {
9068 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9069 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9070 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9071 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9072 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9073 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9074 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9075 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9076 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9077 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9078 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9079 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9080 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9081 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9082 /* SIB */
9083 case 4:
9084 case 12:
9085 {
9086 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9087 uInfo = (uint64_t)bSib << 32;
9088
9089 /* Get the index and scale it. */
9090 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9091 {
9092 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9093 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9094 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9095 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9096 case 4: u64EffAddr = 0; /*none */ break;
9097 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9098 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9099 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9100 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9101 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9102 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9103 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9104 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9105 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9106 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9107 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9108 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9109 }
9110 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9111
9112 /* add base */
9113 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9114 {
9115 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9116 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9117 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9118 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9119 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9120 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9121 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9122 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9123 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9124 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9125 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9126 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9127 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9128 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9129 /* complicated encodings */
9130 case 5:
9131 case 13:
9132 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9133 {
9134 if (!pVCpu->iem.s.uRexB)
9135 {
9136 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9137 SET_SS_DEF();
9138 }
9139 else
9140 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9141 }
9142 else
9143 {
9144 uint32_t u32Disp;
9145 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9146 u64EffAddr += (int32_t)u32Disp;
9147 uInfo |= u32Disp;
9148 }
9149 break;
9150 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9151 }
9152 break;
9153 }
9154 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9155 }
9156
9157 /* Get and add the displacement. */
9158 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9159 {
9160 case 0:
9161 break;
9162 case 1:
9163 {
9164 int8_t i8Disp;
9165 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9166 u64EffAddr += i8Disp;
9167 uInfo |= (uint32_t)(int32_t)i8Disp;
9168 break;
9169 }
9170 case 2:
9171 {
9172 uint32_t u32Disp;
9173 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9174 u64EffAddr += (int32_t)u32Disp;
9175 uInfo |= u32Disp;
9176 break;
9177 }
9178 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9179 }
9180
9181 }
9182
9183 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9184 *pGCPtrEff = u64EffAddr;
9185 else
9186 {
9187 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9188 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9189 }
9190 }
9191 *puInfo = uInfo;
9192
9193 Log5(("iemOpHlpCalcRmEffAddrEx: EffAddr=%#010RGv uInfo=%RX64\n", *pGCPtrEff, uInfo));
9194 return VINF_SUCCESS;
9195}
9196
9197/** @} */
9198
9199
9200#ifdef LOG_ENABLED
9201/**
9202 * Logs the current instruction.
9203 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9204 * @param fSameCtx Set if we have the same context information as the VMM,
9205 * clear if we may have already executed an instruction in
9206 * our debug context. When clear, we assume IEMCPU holds
9207 * valid CPU mode info.
9208 *
9209 * The @a fSameCtx parameter is now misleading and obsolete.
9210 * @param pszFunction The IEM function doing the execution.
9211 */
9212static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
9213{
9214# ifdef IN_RING3
9215 if (LogIs2Enabled())
9216 {
9217 char szInstr[256];
9218 uint32_t cbInstr = 0;
9219 if (fSameCtx)
9220 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
9221 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9222 szInstr, sizeof(szInstr), &cbInstr);
9223 else
9224 {
9225 uint32_t fFlags = 0;
9226 switch (IEM_GET_CPU_MODE(pVCpu))
9227 {
9228 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
9229 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
9230 case IEMMODE_16BIT:
9231 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
9232 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
9233 else
9234 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
9235 break;
9236 }
9237 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
9238 szInstr, sizeof(szInstr), &cbInstr);
9239 }
9240
9241 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9242 Log2(("**** %s fExec=%x\n"
9243 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
9244 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
9245 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
9246 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
9247 " %s\n"
9248 , pszFunction, pVCpu->iem.s.fExec,
9249 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
9250 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
9251 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
9252 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
9253 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
9254 szInstr));
9255
9256 /* This stuff sucks atm. as it fills the log with MSRs. */
9257 //if (LogIs3Enabled())
9258 // DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
9259 }
9260 else
9261# endif
9262 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
9263 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
9264 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
9265}
9266#endif /* LOG_ENABLED */
9267
9268
9269#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9270/**
9271 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
9272 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
9273 *
9274 * @returns Modified rcStrict.
9275 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9276 * @param rcStrict The instruction execution status.
9277 */
9278static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
9279{
9280 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
9281 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
9282 {
9283 /* VMX preemption timer takes priority over NMI-window exits. */
9284 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
9285 {
9286 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
9287 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
9288 }
9289 /*
9290 * Check remaining intercepts.
9291 *
9292 * NMI-window and Interrupt-window VM-exits.
9293 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
9294 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
9295 *
9296 * See Intel spec. 26.7.6 "NMI-Window Exiting".
9297 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
9298 */
9299 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
9300 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9301 && !TRPMHasTrap(pVCpu))
9302 {
9303 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
9304 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
9305 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
9306 {
9307 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
9308 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
9309 }
9310 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
9311 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
9312 {
9313 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
9314 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
9315 }
9316 }
9317 }
9318 /* TPR-below threshold/APIC write has the highest priority. */
9319 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
9320 {
9321 rcStrict = iemVmxApicWriteEmulation(pVCpu);
9322 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9323 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
9324 }
9325 /* MTF takes priority over VMX-preemption timer. */
9326 else
9327 {
9328 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
9329 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9330 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
9331 }
9332 return rcStrict;
9333}
9334#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9335
9336
9337/**
9338 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
9339 * IEMExecOneWithPrefetchedByPC.
9340 *
9341 * Similar code is found in IEMExecLots.
9342 *
9343 * @return Strict VBox status code.
9344 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9345 * @param fExecuteInhibit If set, execute the instruction following CLI,
9346 * POP SS and MOV SS,GR.
9347 * @param pszFunction The calling function name.
9348 */
9349DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
9350{
9351 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9352 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9353 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9354 RT_NOREF_PV(pszFunction);
9355
9356#ifdef IEM_WITH_SETJMP
9357 VBOXSTRICTRC rcStrict;
9358 IEM_TRY_SETJMP(pVCpu, rcStrict)
9359 {
9360 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9361 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9362 }
9363 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9364 {
9365 pVCpu->iem.s.cLongJumps++;
9366 }
9367 IEM_CATCH_LONGJMP_END(pVCpu);
9368#else
9369 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9370 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9371#endif
9372 if (rcStrict == VINF_SUCCESS)
9373 pVCpu->iem.s.cInstructions++;
9374 if (pVCpu->iem.s.cActiveMappings > 0)
9375 {
9376 Assert(rcStrict != VINF_SUCCESS);
9377 iemMemRollback(pVCpu);
9378 }
9379 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9380 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9381 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9382
9383//#ifdef DEBUG
9384// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
9385//#endif
9386
9387#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9388 /*
9389 * Perform any VMX nested-guest instruction boundary actions.
9390 *
9391 * If any of these causes a VM-exit, we must skip executing the next
9392 * instruction (would run into stale page tables). A VM-exit makes sure
9393 * there is no interrupt-inhibition, so that should ensure we don't go
9394 * to try execute the next instruction. Clearing fExecuteInhibit is
9395 * problematic because of the setjmp/longjmp clobbering above.
9396 */
9397 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9398 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
9399 || rcStrict != VINF_SUCCESS)
9400 { /* likely */ }
9401 else
9402 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9403#endif
9404
9405 /* Execute the next instruction as well if a cli, pop ss or
9406 mov ss, Gr has just completed successfully. */
9407 if ( fExecuteInhibit
9408 && rcStrict == VINF_SUCCESS
9409 && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
9410 {
9411 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fExec & (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK));
9412 if (rcStrict == VINF_SUCCESS)
9413 {
9414#ifdef LOG_ENABLED
9415 iemLogCurInstr(pVCpu, false, pszFunction);
9416#endif
9417#ifdef IEM_WITH_SETJMP
9418 IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
9419 {
9420 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9421 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9422 }
9423 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9424 {
9425 pVCpu->iem.s.cLongJumps++;
9426 }
9427 IEM_CATCH_LONGJMP_END(pVCpu);
9428#else
9429 IEM_OPCODE_GET_FIRST_U8(&b);
9430 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9431#endif
9432 if (rcStrict == VINF_SUCCESS)
9433 {
9434 pVCpu->iem.s.cInstructions++;
9435#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9436 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9437 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
9438 { /* likely */ }
9439 else
9440 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9441#endif
9442 }
9443 if (pVCpu->iem.s.cActiveMappings > 0)
9444 {
9445 Assert(rcStrict != VINF_SUCCESS);
9446 iemMemRollback(pVCpu);
9447 }
9448 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9449 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9450 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9451 }
9452 else if (pVCpu->iem.s.cActiveMappings > 0)
9453 iemMemRollback(pVCpu);
9454 /** @todo drop this after we bake this change into RIP advancing. */
9455 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
9456 }
9457
9458 /*
9459 * Return value fiddling, statistics and sanity assertions.
9460 */
9461 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9462
9463 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9464 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9465 return rcStrict;
9466}
9467
9468
9469/**
9470 * Execute one instruction.
9471 *
9472 * @return Strict VBox status code.
9473 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9474 */
9475VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
9476{
9477 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
9478#ifdef LOG_ENABLED
9479 iemLogCurInstr(pVCpu, true, "IEMExecOne");
9480#endif
9481
9482 /*
9483 * Do the decoding and emulation.
9484 */
9485 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9486 if (rcStrict == VINF_SUCCESS)
9487 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
9488 else if (pVCpu->iem.s.cActiveMappings > 0)
9489 iemMemRollback(pVCpu);
9490
9491 if (rcStrict != VINF_SUCCESS)
9492 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9493 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9494 return rcStrict;
9495}
9496
9497
9498VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9499{
9500 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9501 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9502 if (rcStrict == VINF_SUCCESS)
9503 {
9504 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
9505 if (pcbWritten)
9506 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9507 }
9508 else if (pVCpu->iem.s.cActiveMappings > 0)
9509 iemMemRollback(pVCpu);
9510
9511 return rcStrict;
9512}
9513
9514
9515VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9516 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9517{
9518 VBOXSTRICTRC rcStrict;
9519 if ( cbOpcodeBytes
9520 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9521 {
9522 iemInitDecoder(pVCpu, 0 /*fExecOpts*/);
9523#ifdef IEM_WITH_CODE_TLB
9524 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9525 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9526 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9527 pVCpu->iem.s.offCurInstrStart = 0;
9528 pVCpu->iem.s.offInstrNextByte = 0;
9529 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9530#else
9531 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9532 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9533#endif
9534 rcStrict = VINF_SUCCESS;
9535 }
9536 else
9537 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9538 if (rcStrict == VINF_SUCCESS)
9539 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
9540 else if (pVCpu->iem.s.cActiveMappings > 0)
9541 iemMemRollback(pVCpu);
9542
9543 return rcStrict;
9544}
9545
9546
9547VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9548{
9549 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9550 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9551 if (rcStrict == VINF_SUCCESS)
9552 {
9553 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
9554 if (pcbWritten)
9555 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9556 }
9557 else if (pVCpu->iem.s.cActiveMappings > 0)
9558 iemMemRollback(pVCpu);
9559
9560 return rcStrict;
9561}
9562
9563
9564VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9565 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9566{
9567 VBOXSTRICTRC rcStrict;
9568 if ( cbOpcodeBytes
9569 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9570 {
9571 iemInitDecoder(pVCpu, IEM_F_BYPASS_HANDLERS);
9572#ifdef IEM_WITH_CODE_TLB
9573 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9574 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9575 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9576 pVCpu->iem.s.offCurInstrStart = 0;
9577 pVCpu->iem.s.offInstrNextByte = 0;
9578 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9579#else
9580 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9581 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9582#endif
9583 rcStrict = VINF_SUCCESS;
9584 }
9585 else
9586 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9587 if (rcStrict == VINF_SUCCESS)
9588 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
9589 else if (pVCpu->iem.s.cActiveMappings > 0)
9590 iemMemRollback(pVCpu);
9591
9592 return rcStrict;
9593}
9594
9595
9596/**
9597 * For handling split cacheline lock operations when the host has split-lock
9598 * detection enabled.
9599 *
9600 * This will cause the interpreter to disregard the lock prefix and implicit
9601 * locking (xchg).
9602 *
9603 * @returns Strict VBox status code.
9604 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9605 */
9606VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
9607{
9608 /*
9609 * Do the decoding and emulation.
9610 */
9611 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_X86_DISREGARD_LOCK);
9612 if (rcStrict == VINF_SUCCESS)
9613 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
9614 else if (pVCpu->iem.s.cActiveMappings > 0)
9615 iemMemRollback(pVCpu);
9616
9617 if (rcStrict != VINF_SUCCESS)
9618 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9619 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9620 return rcStrict;
9621}
9622
9623
9624/**
9625 * Code common to IEMExecLots and IEMExecRecompilerThreaded that attempts to
9626 * inject a pending TRPM trap.
9627 */
9628VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu)
9629{
9630 Assert(TRPMHasTrap(pVCpu));
9631
9632 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9633 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
9634 {
9635 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
9636#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9637 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
9638 if (fIntrEnabled)
9639 {
9640 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
9641 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9642 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
9643 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
9644 else
9645 {
9646 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
9647 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
9648 }
9649 }
9650#else
9651 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9652#endif
9653 if (fIntrEnabled)
9654 {
9655 uint8_t u8TrapNo;
9656 TRPMEVENT enmType;
9657 uint32_t uErrCode;
9658 RTGCPTR uCr2;
9659 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
9660 AssertRC(rc2);
9661 Assert(enmType == TRPM_HARDWARE_INT);
9662 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
9663
9664 TRPMResetTrap(pVCpu);
9665
9666#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9667 /* Injecting an event may cause a VM-exit. */
9668 if ( rcStrict != VINF_SUCCESS
9669 && rcStrict != VINF_IEM_RAISED_XCPT)
9670 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
9671#else
9672 NOREF(rcStrict);
9673#endif
9674 }
9675 }
9676
9677 return VINF_SUCCESS;
9678}
9679
9680
9681VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
9682{
9683 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
9684 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
9685 Assert(cMaxInstructions > 0);
9686
9687 /*
9688 * See if there is an interrupt pending in TRPM, inject it if we can.
9689 */
9690 /** @todo What if we are injecting an exception and not an interrupt? Is that
9691 * possible here? For now we assert it is indeed only an interrupt. */
9692 if (!TRPMHasTrap(pVCpu))
9693 { /* likely */ }
9694 else
9695 {
9696 VBOXSTRICTRC rcStrict = iemExecInjectPendingTrap(pVCpu);
9697 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9698 { /*likely */ }
9699 else
9700 return rcStrict;
9701 }
9702
9703 /*
9704 * Initial decoder init w/ prefetch, then setup setjmp.
9705 */
9706 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9707 if (rcStrict == VINF_SUCCESS)
9708 {
9709#ifdef IEM_WITH_SETJMP
9710 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
9711 IEM_TRY_SETJMP(pVCpu, rcStrict)
9712#endif
9713 {
9714 /*
9715 * The run loop. We limit ourselves to 4096 instructions right now.
9716 */
9717 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
9718 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9719 for (;;)
9720 {
9721 /*
9722 * Log the state.
9723 */
9724#ifdef LOG_ENABLED
9725 iemLogCurInstr(pVCpu, true, "IEMExecLots");
9726#endif
9727
9728 /*
9729 * Do the decoding and emulation.
9730 */
9731 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9732 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9733#ifdef VBOX_STRICT
9734 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
9735#endif
9736 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9737 {
9738 Assert(pVCpu->iem.s.cActiveMappings == 0);
9739 pVCpu->iem.s.cInstructions++;
9740
9741#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9742 /* Perform any VMX nested-guest instruction boundary actions. */
9743 uint64_t fCpu = pVCpu->fLocalForcedActions;
9744 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9745 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
9746 { /* likely */ }
9747 else
9748 {
9749 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9750 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9751 fCpu = pVCpu->fLocalForcedActions;
9752 else
9753 {
9754 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9755 break;
9756 }
9757 }
9758#endif
9759 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9760 {
9761#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
9762 uint64_t fCpu = pVCpu->fLocalForcedActions;
9763#endif
9764 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9765 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9766 | VMCPU_FF_TLB_FLUSH
9767 | VMCPU_FF_UNHALT );
9768
9769 if (RT_LIKELY( ( !fCpu
9770 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9771 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
9772 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
9773 {
9774 if (--cMaxInstructionsGccStupidity > 0)
9775 {
9776 /* Poll timers every now an then according to the caller's specs. */
9777 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
9778 || !TMTimerPollBool(pVM, pVCpu))
9779 {
9780 Assert(pVCpu->iem.s.cActiveMappings == 0);
9781 iemReInitDecoder(pVCpu);
9782 continue;
9783 }
9784 }
9785 }
9786 }
9787 Assert(pVCpu->iem.s.cActiveMappings == 0);
9788 }
9789 else if (pVCpu->iem.s.cActiveMappings > 0)
9790 iemMemRollback(pVCpu);
9791 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9792 break;
9793 }
9794 }
9795#ifdef IEM_WITH_SETJMP
9796 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9797 {
9798 if (pVCpu->iem.s.cActiveMappings > 0)
9799 iemMemRollback(pVCpu);
9800# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9801 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9802# endif
9803 pVCpu->iem.s.cLongJumps++;
9804 }
9805 IEM_CATCH_LONGJMP_END(pVCpu);
9806#endif
9807
9808 /*
9809 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9810 */
9811 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9812 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9813 }
9814 else
9815 {
9816 if (pVCpu->iem.s.cActiveMappings > 0)
9817 iemMemRollback(pVCpu);
9818
9819#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9820 /*
9821 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9822 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9823 */
9824 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9825#endif
9826 }
9827
9828 /*
9829 * Maybe re-enter raw-mode and log.
9830 */
9831 if (rcStrict != VINF_SUCCESS)
9832 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9833 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9834 if (pcInstructions)
9835 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
9836 return rcStrict;
9837}
9838
9839
9840/**
9841 * Interface used by EMExecuteExec, does exit statistics and limits.
9842 *
9843 * @returns Strict VBox status code.
9844 * @param pVCpu The cross context virtual CPU structure.
9845 * @param fWillExit To be defined.
9846 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
9847 * @param cMaxInstructions Maximum number of instructions to execute.
9848 * @param cMaxInstructionsWithoutExits
9849 * The max number of instructions without exits.
9850 * @param pStats Where to return statistics.
9851 */
9852VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
9853 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
9854{
9855 NOREF(fWillExit); /** @todo define flexible exit crits */
9856
9857 /*
9858 * Initialize return stats.
9859 */
9860 pStats->cInstructions = 0;
9861 pStats->cExits = 0;
9862 pStats->cMaxExitDistance = 0;
9863 pStats->cReserved = 0;
9864
9865 /*
9866 * Initial decoder init w/ prefetch, then setup setjmp.
9867 */
9868 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9869 if (rcStrict == VINF_SUCCESS)
9870 {
9871#ifdef IEM_WITH_SETJMP
9872 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf?!? */
9873 IEM_TRY_SETJMP(pVCpu, rcStrict)
9874#endif
9875 {
9876#ifdef IN_RING0
9877 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
9878#endif
9879 uint32_t cInstructionSinceLastExit = 0;
9880
9881 /*
9882 * The run loop. We limit ourselves to 4096 instructions right now.
9883 */
9884 PVM pVM = pVCpu->CTX_SUFF(pVM);
9885 for (;;)
9886 {
9887 /*
9888 * Log the state.
9889 */
9890#ifdef LOG_ENABLED
9891 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
9892#endif
9893
9894 /*
9895 * Do the decoding and emulation.
9896 */
9897 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
9898
9899 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9900 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9901
9902 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
9903 && cInstructionSinceLastExit > 0 /* don't count the first */ )
9904 {
9905 pStats->cExits += 1;
9906 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
9907 pStats->cMaxExitDistance = cInstructionSinceLastExit;
9908 cInstructionSinceLastExit = 0;
9909 }
9910
9911 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9912 {
9913 Assert(pVCpu->iem.s.cActiveMappings == 0);
9914 pVCpu->iem.s.cInstructions++;
9915 pStats->cInstructions++;
9916 cInstructionSinceLastExit++;
9917
9918#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9919 /* Perform any VMX nested-guest instruction boundary actions. */
9920 uint64_t fCpu = pVCpu->fLocalForcedActions;
9921 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9922 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
9923 { /* likely */ }
9924 else
9925 {
9926 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9927 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9928 fCpu = pVCpu->fLocalForcedActions;
9929 else
9930 {
9931 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9932 break;
9933 }
9934 }
9935#endif
9936 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9937 {
9938#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
9939 uint64_t fCpu = pVCpu->fLocalForcedActions;
9940#endif
9941 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9942 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9943 | VMCPU_FF_TLB_FLUSH
9944 | VMCPU_FF_UNHALT );
9945 if (RT_LIKELY( ( ( !fCpu
9946 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9947 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
9948 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
9949 || pStats->cInstructions < cMinInstructions))
9950 {
9951 if (pStats->cInstructions < cMaxInstructions)
9952 {
9953 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
9954 {
9955#ifdef IN_RING0
9956 if ( !fCheckPreemptionPending
9957 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
9958#endif
9959 {
9960 Assert(pVCpu->iem.s.cActiveMappings == 0);
9961 iemReInitDecoder(pVCpu);
9962 continue;
9963 }
9964#ifdef IN_RING0
9965 rcStrict = VINF_EM_RAW_INTERRUPT;
9966 break;
9967#endif
9968 }
9969 }
9970 }
9971 Assert(!(fCpu & VMCPU_FF_IEM));
9972 }
9973 Assert(pVCpu->iem.s.cActiveMappings == 0);
9974 }
9975 else if (pVCpu->iem.s.cActiveMappings > 0)
9976 iemMemRollback(pVCpu);
9977 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9978 break;
9979 }
9980 }
9981#ifdef IEM_WITH_SETJMP
9982 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9983 {
9984 if (pVCpu->iem.s.cActiveMappings > 0)
9985 iemMemRollback(pVCpu);
9986 pVCpu->iem.s.cLongJumps++;
9987 }
9988 IEM_CATCH_LONGJMP_END(pVCpu);
9989#endif
9990
9991 /*
9992 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9993 */
9994 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9995 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9996 }
9997 else
9998 {
9999 if (pVCpu->iem.s.cActiveMappings > 0)
10000 iemMemRollback(pVCpu);
10001
10002#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10003 /*
10004 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10005 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10006 */
10007 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10008#endif
10009 }
10010
10011 /*
10012 * Maybe re-enter raw-mode and log.
10013 */
10014 if (rcStrict != VINF_SUCCESS)
10015 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
10016 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
10017 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
10018 return rcStrict;
10019}
10020
10021
10022/**
10023 * Injects a trap, fault, abort, software interrupt or external interrupt.
10024 *
10025 * The parameter list matches TRPMQueryTrapAll pretty closely.
10026 *
10027 * @returns Strict VBox status code.
10028 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10029 * @param u8TrapNo The trap number.
10030 * @param enmType What type is it (trap/fault/abort), software
10031 * interrupt or hardware interrupt.
10032 * @param uErrCode The error code if applicable.
10033 * @param uCr2 The CR2 value if applicable.
10034 * @param cbInstr The instruction length (only relevant for
10035 * software interrupts).
10036 */
10037VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
10038 uint8_t cbInstr)
10039{
10040 iemInitDecoder(pVCpu, 0 /*fExecOpts*/); /** @todo wrong init function! */
10041#ifdef DBGFTRACE_ENABLED
10042 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
10043 u8TrapNo, enmType, uErrCode, uCr2);
10044#endif
10045
10046 uint32_t fFlags;
10047 switch (enmType)
10048 {
10049 case TRPM_HARDWARE_INT:
10050 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
10051 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
10052 uErrCode = uCr2 = 0;
10053 break;
10054
10055 case TRPM_SOFTWARE_INT:
10056 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
10057 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
10058 uErrCode = uCr2 = 0;
10059 break;
10060
10061 case TRPM_TRAP:
10062 case TRPM_NMI: /** @todo Distinguish NMI from exception 2. */
10063 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
10064 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
10065 if (u8TrapNo == X86_XCPT_PF)
10066 fFlags |= IEM_XCPT_FLAGS_CR2;
10067 switch (u8TrapNo)
10068 {
10069 case X86_XCPT_DF:
10070 case X86_XCPT_TS:
10071 case X86_XCPT_NP:
10072 case X86_XCPT_SS:
10073 case X86_XCPT_PF:
10074 case X86_XCPT_AC:
10075 case X86_XCPT_GP:
10076 fFlags |= IEM_XCPT_FLAGS_ERR;
10077 break;
10078 }
10079 break;
10080
10081 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10082 }
10083
10084 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
10085
10086 if (pVCpu->iem.s.cActiveMappings > 0)
10087 iemMemRollback(pVCpu);
10088
10089 return rcStrict;
10090}
10091
10092
10093/**
10094 * Injects the active TRPM event.
10095 *
10096 * @returns Strict VBox status code.
10097 * @param pVCpu The cross context virtual CPU structure.
10098 */
10099VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
10100{
10101#ifndef IEM_IMPLEMENTS_TASKSWITCH
10102 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
10103#else
10104 uint8_t u8TrapNo;
10105 TRPMEVENT enmType;
10106 uint32_t uErrCode;
10107 RTGCUINTPTR uCr2;
10108 uint8_t cbInstr;
10109 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
10110 if (RT_FAILURE(rc))
10111 return rc;
10112
10113 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
10114 * ICEBP \#DB injection as a special case. */
10115 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
10116#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
10117 if (rcStrict == VINF_SVM_VMEXIT)
10118 rcStrict = VINF_SUCCESS;
10119#endif
10120#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10121 if (rcStrict == VINF_VMX_VMEXIT)
10122 rcStrict = VINF_SUCCESS;
10123#endif
10124 /** @todo Are there any other codes that imply the event was successfully
10125 * delivered to the guest? See @bugref{6607}. */
10126 if ( rcStrict == VINF_SUCCESS
10127 || rcStrict == VINF_IEM_RAISED_XCPT)
10128 TRPMResetTrap(pVCpu);
10129
10130 return rcStrict;
10131#endif
10132}
10133
10134
10135VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
10136{
10137 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10138 return VERR_NOT_IMPLEMENTED;
10139}
10140
10141
10142VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
10143{
10144 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10145 return VERR_NOT_IMPLEMENTED;
10146}
10147
10148
10149/**
10150 * Interface for HM and EM for executing string I/O OUT (write) instructions.
10151 *
10152 * This API ASSUMES that the caller has already verified that the guest code is
10153 * allowed to access the I/O port. (The I/O port is in the DX register in the
10154 * guest state.)
10155 *
10156 * @returns Strict VBox status code.
10157 * @param pVCpu The cross context virtual CPU structure.
10158 * @param cbValue The size of the I/O port access (1, 2, or 4).
10159 * @param enmAddrMode The addressing mode.
10160 * @param fRepPrefix Indicates whether a repeat prefix is used
10161 * (doesn't matter which for this instruction).
10162 * @param cbInstr The instruction length in bytes.
10163 * @param iEffSeg The effective segment address.
10164 * @param fIoChecked Whether the access to the I/O port has been
10165 * checked or not. It's typically checked in the
10166 * HM scenario.
10167 */
10168VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10169 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
10170{
10171 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
10172 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10173
10174 /*
10175 * State init.
10176 */
10177 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10178
10179 /*
10180 * Switch orgy for getting to the right handler.
10181 */
10182 VBOXSTRICTRC rcStrict;
10183 if (fRepPrefix)
10184 {
10185 switch (enmAddrMode)
10186 {
10187 case IEMMODE_16BIT:
10188 switch (cbValue)
10189 {
10190 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10191 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10192 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10193 default:
10194 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10195 }
10196 break;
10197
10198 case IEMMODE_32BIT:
10199 switch (cbValue)
10200 {
10201 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10202 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10203 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10204 default:
10205 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10206 }
10207 break;
10208
10209 case IEMMODE_64BIT:
10210 switch (cbValue)
10211 {
10212 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10213 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10214 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10215 default:
10216 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10217 }
10218 break;
10219
10220 default:
10221 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10222 }
10223 }
10224 else
10225 {
10226 switch (enmAddrMode)
10227 {
10228 case IEMMODE_16BIT:
10229 switch (cbValue)
10230 {
10231 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10232 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10233 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10234 default:
10235 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10236 }
10237 break;
10238
10239 case IEMMODE_32BIT:
10240 switch (cbValue)
10241 {
10242 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10243 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10244 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10245 default:
10246 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10247 }
10248 break;
10249
10250 case IEMMODE_64BIT:
10251 switch (cbValue)
10252 {
10253 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10254 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10255 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10256 default:
10257 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10258 }
10259 break;
10260
10261 default:
10262 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10263 }
10264 }
10265
10266 if (pVCpu->iem.s.cActiveMappings)
10267 iemMemRollback(pVCpu);
10268
10269 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10270}
10271
10272
10273/**
10274 * Interface for HM and EM for executing string I/O IN (read) instructions.
10275 *
10276 * This API ASSUMES that the caller has already verified that the guest code is
10277 * allowed to access the I/O port. (The I/O port is in the DX register in the
10278 * guest state.)
10279 *
10280 * @returns Strict VBox status code.
10281 * @param pVCpu The cross context virtual CPU structure.
10282 * @param cbValue The size of the I/O port access (1, 2, or 4).
10283 * @param enmAddrMode The addressing mode.
10284 * @param fRepPrefix Indicates whether a repeat prefix is used
10285 * (doesn't matter which for this instruction).
10286 * @param cbInstr The instruction length in bytes.
10287 * @param fIoChecked Whether the access to the I/O port has been
10288 * checked or not. It's typically checked in the
10289 * HM scenario.
10290 */
10291VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10292 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
10293{
10294 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10295
10296 /*
10297 * State init.
10298 */
10299 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10300
10301 /*
10302 * Switch orgy for getting to the right handler.
10303 */
10304 VBOXSTRICTRC rcStrict;
10305 if (fRepPrefix)
10306 {
10307 switch (enmAddrMode)
10308 {
10309 case IEMMODE_16BIT:
10310 switch (cbValue)
10311 {
10312 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10313 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10314 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10315 default:
10316 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10317 }
10318 break;
10319
10320 case IEMMODE_32BIT:
10321 switch (cbValue)
10322 {
10323 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10324 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10325 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10326 default:
10327 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10328 }
10329 break;
10330
10331 case IEMMODE_64BIT:
10332 switch (cbValue)
10333 {
10334 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10335 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10336 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10337 default:
10338 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10339 }
10340 break;
10341
10342 default:
10343 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10344 }
10345 }
10346 else
10347 {
10348 switch (enmAddrMode)
10349 {
10350 case IEMMODE_16BIT:
10351 switch (cbValue)
10352 {
10353 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10354 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10355 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10356 default:
10357 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10358 }
10359 break;
10360
10361 case IEMMODE_32BIT:
10362 switch (cbValue)
10363 {
10364 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10365 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10366 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10367 default:
10368 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10369 }
10370 break;
10371
10372 case IEMMODE_64BIT:
10373 switch (cbValue)
10374 {
10375 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10376 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10377 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10378 default:
10379 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10380 }
10381 break;
10382
10383 default:
10384 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10385 }
10386 }
10387
10388 if ( pVCpu->iem.s.cActiveMappings == 0
10389 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
10390 { /* likely */ }
10391 else
10392 {
10393 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
10394 iemMemRollback(pVCpu);
10395 }
10396 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10397}
10398
10399
10400/**
10401 * Interface for rawmode to write execute an OUT instruction.
10402 *
10403 * @returns Strict VBox status code.
10404 * @param pVCpu The cross context virtual CPU structure.
10405 * @param cbInstr The instruction length in bytes.
10406 * @param u16Port The port to read.
10407 * @param fImm Whether the port is specified using an immediate operand or
10408 * using the implicit DX register.
10409 * @param cbReg The register size.
10410 *
10411 * @remarks In ring-0 not all of the state needs to be synced in.
10412 */
10413VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10414{
10415 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10416 Assert(cbReg <= 4 && cbReg != 3);
10417
10418 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10419 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, cbReg,
10420 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10421 Assert(!pVCpu->iem.s.cActiveMappings);
10422 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10423}
10424
10425
10426/**
10427 * Interface for rawmode to write execute an IN instruction.
10428 *
10429 * @returns Strict VBox status code.
10430 * @param pVCpu The cross context virtual CPU structure.
10431 * @param cbInstr The instruction length in bytes.
10432 * @param u16Port The port to read.
10433 * @param fImm Whether the port is specified using an immediate operand or
10434 * using the implicit DX.
10435 * @param cbReg The register size.
10436 */
10437VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10438{
10439 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10440 Assert(cbReg <= 4 && cbReg != 3);
10441
10442 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10443 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, cbReg,
10444 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10445 Assert(!pVCpu->iem.s.cActiveMappings);
10446 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10447}
10448
10449
10450/**
10451 * Interface for HM and EM to write to a CRx register.
10452 *
10453 * @returns Strict VBox status code.
10454 * @param pVCpu The cross context virtual CPU structure.
10455 * @param cbInstr The instruction length in bytes.
10456 * @param iCrReg The control register number (destination).
10457 * @param iGReg The general purpose register number (source).
10458 *
10459 * @remarks In ring-0 not all of the state needs to be synced in.
10460 */
10461VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
10462{
10463 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10464 Assert(iCrReg < 16);
10465 Assert(iGReg < 16);
10466
10467 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10468 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
10469 Assert(!pVCpu->iem.s.cActiveMappings);
10470 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10471}
10472
10473
10474/**
10475 * Interface for HM and EM to read from a CRx register.
10476 *
10477 * @returns Strict VBox status code.
10478 * @param pVCpu The cross context virtual CPU structure.
10479 * @param cbInstr The instruction length in bytes.
10480 * @param iGReg The general purpose register number (destination).
10481 * @param iCrReg The control register number (source).
10482 *
10483 * @remarks In ring-0 not all of the state needs to be synced in.
10484 */
10485VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
10486{
10487 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10488 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
10489 | CPUMCTX_EXTRN_APIC_TPR);
10490 Assert(iCrReg < 16);
10491 Assert(iGReg < 16);
10492
10493 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10494 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
10495 Assert(!pVCpu->iem.s.cActiveMappings);
10496 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10497}
10498
10499
10500/**
10501 * Interface for HM and EM to write to a DRx register.
10502 *
10503 * @returns Strict VBox status code.
10504 * @param pVCpu The cross context virtual CPU structure.
10505 * @param cbInstr The instruction length in bytes.
10506 * @param iDrReg The debug register number (destination).
10507 * @param iGReg The general purpose register number (source).
10508 *
10509 * @remarks In ring-0 not all of the state needs to be synced in.
10510 */
10511VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iDrReg, uint8_t iGReg)
10512{
10513 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10514 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10515 Assert(iDrReg < 8);
10516 Assert(iGReg < 16);
10517
10518 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10519 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Dd_Rd, iDrReg, iGReg);
10520 Assert(!pVCpu->iem.s.cActiveMappings);
10521 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10522}
10523
10524
10525/**
10526 * Interface for HM and EM to read from a DRx register.
10527 *
10528 * @returns Strict VBox status code.
10529 * @param pVCpu The cross context virtual CPU structure.
10530 * @param cbInstr The instruction length in bytes.
10531 * @param iGReg The general purpose register number (destination).
10532 * @param iDrReg The debug register number (source).
10533 *
10534 * @remarks In ring-0 not all of the state needs to be synced in.
10535 */
10536VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iDrReg)
10537{
10538 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10539 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10540 Assert(iDrReg < 8);
10541 Assert(iGReg < 16);
10542
10543 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10544 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Dd, iGReg, iDrReg);
10545 Assert(!pVCpu->iem.s.cActiveMappings);
10546 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10547}
10548
10549
10550/**
10551 * Interface for HM and EM to clear the CR0[TS] bit.
10552 *
10553 * @returns Strict VBox status code.
10554 * @param pVCpu The cross context virtual CPU structure.
10555 * @param cbInstr The instruction length in bytes.
10556 *
10557 * @remarks In ring-0 not all of the state needs to be synced in.
10558 */
10559VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
10560{
10561 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10562
10563 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10564 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
10565 Assert(!pVCpu->iem.s.cActiveMappings);
10566 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10567}
10568
10569
10570/**
10571 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
10572 *
10573 * @returns Strict VBox status code.
10574 * @param pVCpu The cross context virtual CPU structure.
10575 * @param cbInstr The instruction length in bytes.
10576 * @param uValue The value to load into CR0.
10577 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
10578 * memory operand. Otherwise pass NIL_RTGCPTR.
10579 *
10580 * @remarks In ring-0 not all of the state needs to be synced in.
10581 */
10582VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
10583{
10584 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10585
10586 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10587 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
10588 Assert(!pVCpu->iem.s.cActiveMappings);
10589 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10590}
10591
10592
10593/**
10594 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
10595 *
10596 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
10597 *
10598 * @returns Strict VBox status code.
10599 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10600 * @param cbInstr The instruction length in bytes.
10601 * @remarks In ring-0 not all of the state needs to be synced in.
10602 * @thread EMT(pVCpu)
10603 */
10604VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
10605{
10606 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10607
10608 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10609 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
10610 Assert(!pVCpu->iem.s.cActiveMappings);
10611 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10612}
10613
10614
10615/**
10616 * Interface for HM and EM to emulate the WBINVD instruction.
10617 *
10618 * @returns Strict VBox status code.
10619 * @param pVCpu The cross context virtual CPU structure.
10620 * @param cbInstr The instruction length in bytes.
10621 *
10622 * @remarks In ring-0 not all of the state needs to be synced in.
10623 */
10624VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10625{
10626 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10627
10628 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10629 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
10630 Assert(!pVCpu->iem.s.cActiveMappings);
10631 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10632}
10633
10634
10635/**
10636 * Interface for HM and EM to emulate the INVD instruction.
10637 *
10638 * @returns Strict VBox status code.
10639 * @param pVCpu The cross context virtual CPU structure.
10640 * @param cbInstr The instruction length in bytes.
10641 *
10642 * @remarks In ring-0 not all of the state needs to be synced in.
10643 */
10644VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10645{
10646 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10647
10648 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10649 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
10650 Assert(!pVCpu->iem.s.cActiveMappings);
10651 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10652}
10653
10654
10655/**
10656 * Interface for HM and EM to emulate the INVLPG instruction.
10657 *
10658 * @returns Strict VBox status code.
10659 * @retval VINF_PGM_SYNC_CR3
10660 *
10661 * @param pVCpu The cross context virtual CPU structure.
10662 * @param cbInstr The instruction length in bytes.
10663 * @param GCPtrPage The effective address of the page to invalidate.
10664 *
10665 * @remarks In ring-0 not all of the state needs to be synced in.
10666 */
10667VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
10668{
10669 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10670
10671 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10672 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
10673 Assert(!pVCpu->iem.s.cActiveMappings);
10674 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10675}
10676
10677
10678/**
10679 * Interface for HM and EM to emulate the INVPCID instruction.
10680 *
10681 * @returns Strict VBox status code.
10682 * @retval VINF_PGM_SYNC_CR3
10683 *
10684 * @param pVCpu The cross context virtual CPU structure.
10685 * @param cbInstr The instruction length in bytes.
10686 * @param iEffSeg The effective segment register.
10687 * @param GCPtrDesc The effective address of the INVPCID descriptor.
10688 * @param uType The invalidation type.
10689 *
10690 * @remarks In ring-0 not all of the state needs to be synced in.
10691 */
10692VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
10693 uint64_t uType)
10694{
10695 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
10696
10697 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10698 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
10699 Assert(!pVCpu->iem.s.cActiveMappings);
10700 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10701}
10702
10703
10704/**
10705 * Interface for HM and EM to emulate the CPUID instruction.
10706 *
10707 * @returns Strict VBox status code.
10708 *
10709 * @param pVCpu The cross context virtual CPU structure.
10710 * @param cbInstr The instruction length in bytes.
10711 *
10712 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
10713 */
10714VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
10715{
10716 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10717 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
10718
10719 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10720 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
10721 Assert(!pVCpu->iem.s.cActiveMappings);
10722 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10723}
10724
10725
10726/**
10727 * Interface for HM and EM to emulate the RDPMC instruction.
10728 *
10729 * @returns Strict VBox status code.
10730 *
10731 * @param pVCpu The cross context virtual CPU structure.
10732 * @param cbInstr The instruction length in bytes.
10733 *
10734 * @remarks Not all of the state needs to be synced in.
10735 */
10736VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
10737{
10738 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10739 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10740
10741 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10742 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
10743 Assert(!pVCpu->iem.s.cActiveMappings);
10744 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10745}
10746
10747
10748/**
10749 * Interface for HM and EM to emulate the RDTSC instruction.
10750 *
10751 * @returns Strict VBox status code.
10752 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10753 *
10754 * @param pVCpu The cross context virtual CPU structure.
10755 * @param cbInstr The instruction length in bytes.
10756 *
10757 * @remarks Not all of the state needs to be synced in.
10758 */
10759VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
10760{
10761 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10762 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10763
10764 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10765 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
10766 Assert(!pVCpu->iem.s.cActiveMappings);
10767 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10768}
10769
10770
10771/**
10772 * Interface for HM and EM to emulate the RDTSCP instruction.
10773 *
10774 * @returns Strict VBox status code.
10775 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10776 *
10777 * @param pVCpu The cross context virtual CPU structure.
10778 * @param cbInstr The instruction length in bytes.
10779 *
10780 * @remarks Not all of the state needs to be synced in. Recommended
10781 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
10782 */
10783VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
10784{
10785 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10786 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
10787
10788 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10789 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
10790 Assert(!pVCpu->iem.s.cActiveMappings);
10791 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10792}
10793
10794
10795/**
10796 * Interface for HM and EM to emulate the RDMSR instruction.
10797 *
10798 * @returns Strict VBox status code.
10799 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10800 *
10801 * @param pVCpu The cross context virtual CPU structure.
10802 * @param cbInstr The instruction length in bytes.
10803 *
10804 * @remarks Not all of the state needs to be synced in. Requires RCX and
10805 * (currently) all MSRs.
10806 */
10807VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10808{
10809 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10810 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
10811
10812 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10813 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
10814 Assert(!pVCpu->iem.s.cActiveMappings);
10815 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10816}
10817
10818
10819/**
10820 * Interface for HM and EM to emulate the WRMSR instruction.
10821 *
10822 * @returns Strict VBox status code.
10823 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10824 *
10825 * @param pVCpu The cross context virtual CPU structure.
10826 * @param cbInstr The instruction length in bytes.
10827 *
10828 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
10829 * and (currently) all MSRs.
10830 */
10831VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10832{
10833 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10834 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
10835 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
10836
10837 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10838 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
10839 Assert(!pVCpu->iem.s.cActiveMappings);
10840 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10841}
10842
10843
10844/**
10845 * Interface for HM and EM to emulate the MONITOR instruction.
10846 *
10847 * @returns Strict VBox status code.
10848 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10849 *
10850 * @param pVCpu The cross context virtual CPU structure.
10851 * @param cbInstr The instruction length in bytes.
10852 *
10853 * @remarks Not all of the state needs to be synced in.
10854 * @remarks ASSUMES the default segment of DS and no segment override prefixes
10855 * are used.
10856 */
10857VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
10858{
10859 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10860 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
10861
10862 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10863 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
10864 Assert(!pVCpu->iem.s.cActiveMappings);
10865 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10866}
10867
10868
10869/**
10870 * Interface for HM and EM to emulate the MWAIT instruction.
10871 *
10872 * @returns Strict VBox status code.
10873 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10874 *
10875 * @param pVCpu The cross context virtual CPU structure.
10876 * @param cbInstr The instruction length in bytes.
10877 *
10878 * @remarks Not all of the state needs to be synced in.
10879 */
10880VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
10881{
10882 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10883 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
10884
10885 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10886 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
10887 Assert(!pVCpu->iem.s.cActiveMappings);
10888 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10889}
10890
10891
10892/**
10893 * Interface for HM and EM to emulate the HLT instruction.
10894 *
10895 * @returns Strict VBox status code.
10896 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10897 *
10898 * @param pVCpu The cross context virtual CPU structure.
10899 * @param cbInstr The instruction length in bytes.
10900 *
10901 * @remarks Not all of the state needs to be synced in.
10902 */
10903VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
10904{
10905 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10906
10907 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10908 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
10909 Assert(!pVCpu->iem.s.cActiveMappings);
10910 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10911}
10912
10913
10914/**
10915 * Checks if IEM is in the process of delivering an event (interrupt or
10916 * exception).
10917 *
10918 * @returns true if we're in the process of raising an interrupt or exception,
10919 * false otherwise.
10920 * @param pVCpu The cross context virtual CPU structure.
10921 * @param puVector Where to store the vector associated with the
10922 * currently delivered event, optional.
10923 * @param pfFlags Where to store th event delivery flags (see
10924 * IEM_XCPT_FLAGS_XXX), optional.
10925 * @param puErr Where to store the error code associated with the
10926 * event, optional.
10927 * @param puCr2 Where to store the CR2 associated with the event,
10928 * optional.
10929 * @remarks The caller should check the flags to determine if the error code and
10930 * CR2 are valid for the event.
10931 */
10932VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
10933{
10934 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
10935 if (fRaisingXcpt)
10936 {
10937 if (puVector)
10938 *puVector = pVCpu->iem.s.uCurXcpt;
10939 if (pfFlags)
10940 *pfFlags = pVCpu->iem.s.fCurXcpt;
10941 if (puErr)
10942 *puErr = pVCpu->iem.s.uCurXcptErr;
10943 if (puCr2)
10944 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
10945 }
10946 return fRaisingXcpt;
10947}
10948
10949#ifdef IN_RING3
10950
10951/**
10952 * Handles the unlikely and probably fatal merge cases.
10953 *
10954 * @returns Merged status code.
10955 * @param rcStrict Current EM status code.
10956 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
10957 * with @a rcStrict.
10958 * @param iMemMap The memory mapping index. For error reporting only.
10959 * @param pVCpu The cross context virtual CPU structure of the calling
10960 * thread, for error reporting only.
10961 */
10962DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
10963 unsigned iMemMap, PVMCPUCC pVCpu)
10964{
10965 if (RT_FAILURE_NP(rcStrict))
10966 return rcStrict;
10967
10968 if (RT_FAILURE_NP(rcStrictCommit))
10969 return rcStrictCommit;
10970
10971 if (rcStrict == rcStrictCommit)
10972 return rcStrictCommit;
10973
10974 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
10975 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
10976 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
10977 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
10978 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
10979 return VERR_IOM_FF_STATUS_IPE;
10980}
10981
10982
10983/**
10984 * Helper for IOMR3ProcessForceFlag.
10985 *
10986 * @returns Merged status code.
10987 * @param rcStrict Current EM status code.
10988 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
10989 * with @a rcStrict.
10990 * @param iMemMap The memory mapping index. For error reporting only.
10991 * @param pVCpu The cross context virtual CPU structure of the calling
10992 * thread, for error reporting only.
10993 */
10994DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
10995{
10996 /* Simple. */
10997 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
10998 return rcStrictCommit;
10999
11000 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
11001 return rcStrict;
11002
11003 /* EM scheduling status codes. */
11004 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
11005 && rcStrict <= VINF_EM_LAST))
11006 {
11007 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
11008 && rcStrictCommit <= VINF_EM_LAST))
11009 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
11010 }
11011
11012 /* Unlikely */
11013 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
11014}
11015
11016
11017/**
11018 * Called by force-flag handling code when VMCPU_FF_IEM is set.
11019 *
11020 * @returns Merge between @a rcStrict and what the commit operation returned.
11021 * @param pVM The cross context VM structure.
11022 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11023 * @param rcStrict The status code returned by ring-0 or raw-mode.
11024 */
11025VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
11026{
11027 /*
11028 * Reset the pending commit.
11029 */
11030 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
11031 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
11032 ("%#x %#x %#x\n",
11033 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11034 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
11035
11036 /*
11037 * Commit the pending bounce buffers (usually just one).
11038 */
11039 unsigned cBufs = 0;
11040 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
11041 while (iMemMap-- > 0)
11042 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
11043 {
11044 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
11045 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
11046 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
11047
11048 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
11049 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
11050 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
11051
11052 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
11053 {
11054 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
11055 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
11056 pbBuf,
11057 cbFirst,
11058 PGMACCESSORIGIN_IEM);
11059 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
11060 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
11061 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
11062 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
11063 }
11064
11065 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
11066 {
11067 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
11068 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
11069 pbBuf + cbFirst,
11070 cbSecond,
11071 PGMACCESSORIGIN_IEM);
11072 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
11073 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
11074 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
11075 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
11076 }
11077 cBufs++;
11078 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
11079 }
11080
11081 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
11082 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
11083 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11084 pVCpu->iem.s.cActiveMappings = 0;
11085 return rcStrict;
11086}
11087
11088#endif /* IN_RING3 */
11089
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette