VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 105223

Last change on this file since 105223 was 105177, checked in by vboxsync, 7 months ago

VMM/IEM: Increase TLB size to 8192 on arm; quick fix for 2M/4M page problem with invlpg. On AMD64 we stick to 256 TLB entries for VM struct size reasons. bugref:10687

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 456.1 KB
Line 
1/* $Id: IEMAll.cpp 105177 2024-07-08 09:29:14Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_iem IEM - Interpreted Execution Manager
30 *
31 * The interpreted exeuction manager (IEM) is for executing short guest code
32 * sequences that are causing too many exits / virtualization traps. It will
33 * also be used to interpret single instructions, thus replacing the selective
34 * interpreters in EM and IOM.
35 *
36 * Design goals:
37 * - Relatively small footprint, although we favour speed and correctness
38 * over size.
39 * - Reasonably fast.
40 * - Correctly handle lock prefixed instructions.
41 * - Complete instruction set - eventually.
42 * - Refactorable into a recompiler, maybe.
43 * - Replace EMInterpret*.
44 *
45 * Using the existing disassembler has been considered, however this is thought
46 * to conflict with speed as the disassembler chews things a bit too much while
47 * leaving us with a somewhat complicated state to interpret afterwards.
48 *
49 *
50 * The current code is very much work in progress. You've been warned!
51 *
52 *
53 * @section sec_iem_fpu_instr FPU Instructions
54 *
55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
56 * same or equivalent instructions on the host FPU. To make life easy, we also
57 * let the FPU prioritize the unmasked exceptions for us. This however, only
58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
59 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
60 * can trigger spurious FPU exceptions.
61 *
62 * The guest FPU state is not loaded into the host CPU and kept there till we
63 * leave IEM because the calling conventions have declared an all year open
64 * season on much of the FPU state. For instance an innocent looking call to
65 * memcpy might end up using a whole bunch of XMM or MM registers if the
66 * particular implementation finds it worthwhile.
67 *
68 *
69 * @section sec_iem_logging Logging
70 *
71 * The IEM code uses the \"IEM\" log group for the main logging. The different
72 * logging levels/flags are generally used for the following purposes:
73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
74 * - Flow (LogFlow) : Basic enter/exit IEM state info.
75 * - Level 2 (Log2) : ?
76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
78 * - Level 5 (Log5) : Decoding details.
79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
80 * - Level 7 (Log7) : iret++ execution logging.
81 * - Level 8 (Log8) :
82 * - Level 9 (Log9) :
83 * - Level 10 (Log10): TLBs.
84 * - Level 11 (Log11): Unmasked FPU exceptions.
85 *
86 * The \"IEM_MEM\" log group covers most of memory related details logging,
87 * except for errors and exceptions:
88 * - Level 1 (Log) : Reads.
89 * - Level 2 (Log2) : Read fallbacks.
90 * - Level 3 (Log3) : MemMap read.
91 * - Level 4 (Log4) : MemMap read fallbacks.
92 * - Level 5 (Log5) : Writes
93 * - Level 6 (Log6) : Write fallbacks.
94 * - Level 7 (Log7) : MemMap writes and read-writes.
95 * - Level 8 (Log8) : MemMap write and read-write fallbacks.
96 * - Level 9 (Log9) : Stack reads.
97 * - Level 10 (Log10): Stack read fallbacks.
98 * - Level 11 (Log11): Stack writes.
99 * - Level 12 (Log12): Stack write fallbacks.
100 * - Flow (LogFlow) :
101 *
102 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
103 * - Level 1 (Log) : Errors and other major events.
104 * - Flow (LogFlow) : Misc flow stuff (cleanup?)
105 * - Level 2 (Log2) : VM exits.
106 *
107 * The syscall logging level assignments:
108 * - Level 1: DOS and BIOS.
109 * - Level 2: Windows 3.x
110 * - Level 3: Linux.
111 */
112
113/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
114#ifdef _MSC_VER
115# pragma warning(disable:4505)
116#endif
117
118
119/*********************************************************************************************************************************
120* Header Files *
121*********************************************************************************************************************************/
122#define LOG_GROUP LOG_GROUP_IEM
123#define VMCPU_INCL_CPUM_GST_CTX
124#include <VBox/vmm/iem.h>
125#include <VBox/vmm/cpum.h>
126#include <VBox/vmm/apic.h>
127#include <VBox/vmm/pdm.h>
128#include <VBox/vmm/pgm.h>
129#include <VBox/vmm/iom.h>
130#include <VBox/vmm/em.h>
131#include <VBox/vmm/hm.h>
132#include <VBox/vmm/nem.h>
133#include <VBox/vmm/gcm.h>
134#include <VBox/vmm/gim.h>
135#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
136# include <VBox/vmm/em.h>
137# include <VBox/vmm/hm_svm.h>
138#endif
139#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
140# include <VBox/vmm/hmvmxinline.h>
141#endif
142#include <VBox/vmm/tm.h>
143#include <VBox/vmm/dbgf.h>
144#include <VBox/vmm/dbgftrace.h>
145#include "IEMInternal.h"
146#include <VBox/vmm/vmcc.h>
147#include <VBox/log.h>
148#include <VBox/err.h>
149#include <VBox/param.h>
150#include <VBox/dis.h>
151#include <iprt/asm-math.h>
152#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
153# include <iprt/asm-amd64-x86.h>
154#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
155# include <iprt/asm-arm.h>
156#endif
157#include <iprt/assert.h>
158#include <iprt/string.h>
159#include <iprt/x86.h>
160
161#include "IEMInline.h"
162
163
164/*********************************************************************************************************************************
165* Structures and Typedefs *
166*********************************************************************************************************************************/
167/**
168 * CPU exception classes.
169 */
170typedef enum IEMXCPTCLASS
171{
172 IEMXCPTCLASS_BENIGN,
173 IEMXCPTCLASS_CONTRIBUTORY,
174 IEMXCPTCLASS_PAGE_FAULT,
175 IEMXCPTCLASS_DOUBLE_FAULT
176} IEMXCPTCLASS;
177
178
179/*********************************************************************************************************************************
180* Global Variables *
181*********************************************************************************************************************************/
182#if defined(IEM_LOG_MEMORY_WRITES)
183/** What IEM just wrote. */
184uint8_t g_abIemWrote[256];
185/** How much IEM just wrote. */
186size_t g_cbIemWrote;
187#endif
188
189
190/*********************************************************************************************************************************
191* Internal Functions *
192*********************************************************************************************************************************/
193static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
194 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
195
196
197/**
198 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags, slow code
199 * path.
200 *
201 * This will also invalidate TLB entries for any pages with active data
202 * breakpoints on them.
203 *
204 * @returns IEM_F_BRK_PENDING_XXX or zero.
205 * @param pVCpu The cross context virtual CPU structure of the
206 * calling thread.
207 *
208 * @note Don't call directly, use iemCalcExecDbgFlags instead.
209 */
210uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu)
211{
212 uint32_t fExec = 0;
213
214 /*
215 * Helper for invalidate the data TLB for breakpoint addresses.
216 *
217 * This is to make sure any access to the page will always trigger a TLB
218 * load for as long as the breakpoint is enabled.
219 */
220#ifdef IEM_WITH_DATA_TLB
221# define INVALID_TLB_ENTRY_FOR_BP(a_uValue) do { \
222 RTGCPTR uTagNoRev = (a_uValue); \
223 uTagNoRev = IEMTLB_CALC_TAG_NO_REV(uTagNoRev); \
224 uintptr_t const idxEven = IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev); \
225 if (pVCpu->iem.s.DataTlb.aEntries[idxEven].uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)) \
226 pVCpu->iem.s.DataTlb.aEntries[idxEven].uTag = 0; \
227 if (pVCpu->iem.s.DataTlb.aEntries[idxEven + 1].uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)) \
228 pVCpu->iem.s.DataTlb.aEntries[idxEven + 1].uTag = 0; \
229 } while (0)
230#else
231# define INVALID_TLB_ENTRY_FOR_BP(a_uValue) do { } while (0)
232#endif
233
234 /*
235 * Process guest breakpoints.
236 */
237#define PROCESS_ONE_BP(a_fDr7, a_iBp, a_uValue) do { \
238 if (a_fDr7 & X86_DR7_L_G(a_iBp)) \
239 { \
240 switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \
241 { \
242 case X86_DR7_RW_EO: \
243 fExec |= IEM_F_PENDING_BRK_INSTR; \
244 break; \
245 case X86_DR7_RW_WO: \
246 case X86_DR7_RW_RW: \
247 fExec |= IEM_F_PENDING_BRK_DATA; \
248 INVALID_TLB_ENTRY_FOR_BP(a_uValue); \
249 break; \
250 case X86_DR7_RW_IO: \
251 fExec |= IEM_F_PENDING_BRK_X86_IO; \
252 break; \
253 } \
254 } \
255 } while (0)
256
257 uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];
258 if (fGstDr7 & X86_DR7_ENABLED_MASK)
259 {
260/** @todo extract more details here to simplify matching later. */
261#ifdef IEM_WITH_DATA_TLB
262 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
263#endif
264 PROCESS_ONE_BP(fGstDr7, 0, pVCpu->cpum.GstCtx.dr[0]);
265 PROCESS_ONE_BP(fGstDr7, 1, pVCpu->cpum.GstCtx.dr[1]);
266 PROCESS_ONE_BP(fGstDr7, 2, pVCpu->cpum.GstCtx.dr[2]);
267 PROCESS_ONE_BP(fGstDr7, 3, pVCpu->cpum.GstCtx.dr[3]);
268 }
269
270 /*
271 * Process hypervisor breakpoints.
272 */
273 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
274 uint32_t const fHyperDr7 = DBGFBpGetDR7(pVM);
275 if (fHyperDr7 & X86_DR7_ENABLED_MASK)
276 {
277/** @todo extract more details here to simplify matching later. */
278 PROCESS_ONE_BP(fHyperDr7, 0, DBGFBpGetDR0(pVM));
279 PROCESS_ONE_BP(fHyperDr7, 1, DBGFBpGetDR1(pVM));
280 PROCESS_ONE_BP(fHyperDr7, 2, DBGFBpGetDR2(pVM));
281 PROCESS_ONE_BP(fHyperDr7, 3, DBGFBpGetDR3(pVM));
282 }
283
284 return fExec;
285}
286
287
288/**
289 * Initializes the decoder state.
290 *
291 * iemReInitDecoder is mostly a copy of this function.
292 *
293 * @param pVCpu The cross context virtual CPU structure of the
294 * calling thread.
295 * @param fExecOpts Optional execution flags:
296 * - IEM_F_BYPASS_HANDLERS
297 * - IEM_F_X86_DISREGARD_LOCK
298 */
299DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, uint32_t fExecOpts)
300{
301 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
302 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
303 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
304 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
305 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
306 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
307 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
308 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
309 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
310 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
311
312 /* Execution state: */
313 uint32_t fExec;
314 pVCpu->iem.s.fExec = fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
315
316 /* Decoder state: */
317 pVCpu->iem.s.enmDefAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
318 pVCpu->iem.s.enmEffAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK;
319 if ((fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)
320 {
321 pVCpu->iem.s.enmDefOpSize = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
322 pVCpu->iem.s.enmEffOpSize = fExec & IEM_F_MODE_CPUMODE_MASK;
323 }
324 else
325 {
326 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
327 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
328 }
329 pVCpu->iem.s.fPrefixes = 0;
330 pVCpu->iem.s.uRexReg = 0;
331 pVCpu->iem.s.uRexB = 0;
332 pVCpu->iem.s.uRexIndex = 0;
333 pVCpu->iem.s.idxPrefix = 0;
334 pVCpu->iem.s.uVex3rdReg = 0;
335 pVCpu->iem.s.uVexLength = 0;
336 pVCpu->iem.s.fEvexStuff = 0;
337 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
338#ifdef IEM_WITH_CODE_TLB
339 pVCpu->iem.s.pbInstrBuf = NULL;
340 pVCpu->iem.s.offInstrNextByte = 0;
341 pVCpu->iem.s.offCurInstrStart = 0;
342# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
343 pVCpu->iem.s.offOpcode = 0;
344# endif
345# ifdef VBOX_STRICT
346 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
347 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
348 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
349 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
350# endif
351#else
352 pVCpu->iem.s.offOpcode = 0;
353 pVCpu->iem.s.cbOpcode = 0;
354#endif
355 pVCpu->iem.s.offModRm = 0;
356 pVCpu->iem.s.cActiveMappings = 0;
357 pVCpu->iem.s.iNextMapping = 0;
358 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
359
360#ifdef DBGFTRACE_ENABLED
361 switch (IEM_GET_CPU_MODE(pVCpu))
362 {
363 case IEMMODE_64BIT:
364 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
365 break;
366 case IEMMODE_32BIT:
367 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
368 break;
369 case IEMMODE_16BIT:
370 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
371 break;
372 }
373#endif
374}
375
376
377/**
378 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
379 *
380 * This is mostly a copy of iemInitDecoder.
381 *
382 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
383 */
384DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
385{
386 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
387 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
388 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
389 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
390 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
391 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
392 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
393 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
394 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
395
396 /* ASSUMES: Anyone changing CPU state affecting the fExec bits will update them! */
397 AssertMsg((pVCpu->iem.s.fExec & ~IEM_F_USER_OPTS) == iemCalcExecFlags(pVCpu),
398 ("fExec=%#x iemCalcExecModeFlags=%#x\n", pVCpu->iem.s.fExec, iemCalcExecFlags(pVCpu)));
399
400 IEMMODE const enmMode = IEM_GET_CPU_MODE(pVCpu);
401 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
402 pVCpu->iem.s.enmEffAddrMode = enmMode;
403 if (enmMode != IEMMODE_64BIT)
404 {
405 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
406 pVCpu->iem.s.enmEffOpSize = enmMode;
407 }
408 else
409 {
410 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
411 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
412 }
413 pVCpu->iem.s.fPrefixes = 0;
414 pVCpu->iem.s.uRexReg = 0;
415 pVCpu->iem.s.uRexB = 0;
416 pVCpu->iem.s.uRexIndex = 0;
417 pVCpu->iem.s.idxPrefix = 0;
418 pVCpu->iem.s.uVex3rdReg = 0;
419 pVCpu->iem.s.uVexLength = 0;
420 pVCpu->iem.s.fEvexStuff = 0;
421 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
422#ifdef IEM_WITH_CODE_TLB
423 if (pVCpu->iem.s.pbInstrBuf)
424 {
425 uint64_t off = (enmMode == IEMMODE_64BIT
426 ? pVCpu->cpum.GstCtx.rip
427 : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
428 - pVCpu->iem.s.uInstrBufPc;
429 if (off < pVCpu->iem.s.cbInstrBufTotal)
430 {
431 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
432 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
433 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
434 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
435 else
436 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
437 }
438 else
439 {
440 pVCpu->iem.s.pbInstrBuf = NULL;
441 pVCpu->iem.s.offInstrNextByte = 0;
442 pVCpu->iem.s.offCurInstrStart = 0;
443 pVCpu->iem.s.cbInstrBuf = 0;
444 pVCpu->iem.s.cbInstrBufTotal = 0;
445 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
446 }
447 }
448 else
449 {
450 pVCpu->iem.s.offInstrNextByte = 0;
451 pVCpu->iem.s.offCurInstrStart = 0;
452 pVCpu->iem.s.cbInstrBuf = 0;
453 pVCpu->iem.s.cbInstrBufTotal = 0;
454# ifdef VBOX_STRICT
455 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
456# endif
457 }
458# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
459 pVCpu->iem.s.offOpcode = 0;
460# endif
461#else /* !IEM_WITH_CODE_TLB */
462 pVCpu->iem.s.cbOpcode = 0;
463 pVCpu->iem.s.offOpcode = 0;
464#endif /* !IEM_WITH_CODE_TLB */
465 pVCpu->iem.s.offModRm = 0;
466 Assert(pVCpu->iem.s.cActiveMappings == 0);
467 pVCpu->iem.s.iNextMapping = 0;
468 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
469 Assert(!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS));
470
471#ifdef DBGFTRACE_ENABLED
472 switch (enmMode)
473 {
474 case IEMMODE_64BIT:
475 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
476 break;
477 case IEMMODE_32BIT:
478 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
479 break;
480 case IEMMODE_16BIT:
481 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
482 break;
483 }
484#endif
485}
486
487
488
489/**
490 * Prefetch opcodes the first time when starting executing.
491 *
492 * @returns Strict VBox status code.
493 * @param pVCpu The cross context virtual CPU structure of the
494 * calling thread.
495 * @param fExecOpts Optional execution flags:
496 * - IEM_F_BYPASS_HANDLERS
497 * - IEM_F_X86_DISREGARD_LOCK
498 */
499static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
500{
501 iemInitDecoder(pVCpu, fExecOpts);
502
503#ifndef IEM_WITH_CODE_TLB
504 /*
505 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
506 *
507 * First translate CS:rIP to a physical address.
508 *
509 * Note! The iemOpcodeFetchMoreBytes code depends on this here code to fetch
510 * all relevant bytes from the first page, as it ASSUMES it's only ever
511 * called for dealing with CS.LIM, page crossing and instructions that
512 * are too long.
513 */
514 uint32_t cbToTryRead;
515 RTGCPTR GCPtrPC;
516 if (IEM_IS_64BIT_CODE(pVCpu))
517 {
518 cbToTryRead = GUEST_PAGE_SIZE;
519 GCPtrPC = pVCpu->cpum.GstCtx.rip;
520 if (IEM_IS_CANONICAL(GCPtrPC))
521 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
522 else
523 return iemRaiseGeneralProtectionFault0(pVCpu);
524 }
525 else
526 {
527 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
528 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu), ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
529 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
530 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
531 else
532 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
533 if (cbToTryRead) { /* likely */ }
534 else /* overflowed */
535 {
536 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
537 cbToTryRead = UINT32_MAX;
538 }
539 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
540 Assert(GCPtrPC <= UINT32_MAX);
541 }
542
543 PGMPTWALKFAST WalkFast;
544 int rc = PGMGstQueryPageFast(pVCpu, GCPtrPC,
545 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
546 &WalkFast);
547 if (RT_SUCCESS(rc))
548 Assert(WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED);
549 else
550 {
551 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
552# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
553/** @todo This isn't quite right yet, as PGM_GST_SLAT_NAME_EPT(Walk) doesn't
554 * know about what kind of access we're making! See PGM_GST_NAME(WalkFast). */
555 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
556 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
557# endif
558 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);
559 }
560#if 0
561 if ((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3) { /* likely */ }
562 else
563 {
564 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
565# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
566/** @todo this is completely wrong for EPT. WalkFast.fFailed is always zero here!*/
567# error completely wrong
568 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
569 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
570# endif
571 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
572 }
573 if (!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
574 else
575 {
576 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
577# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
578/** @todo this is completely wrong for EPT. WalkFast.fFailed is always zero here!*/
579# error completely wrong.
580 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
581 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
582# endif
583 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
584 }
585#else
586 Assert((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3);
587 Assert(!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
588#endif
589 RTGCPHYS const GCPhys = WalkFast.GCPhys;
590
591 /*
592 * Read the bytes at this address.
593 */
594 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
595 if (cbToTryRead > cbLeftOnPage)
596 cbToTryRead = cbLeftOnPage;
597 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
598 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
599
600 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
601 {
602 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
603 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
604 { /* likely */ }
605 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
606 {
607 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
608 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
609 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
610 }
611 else
612 {
613 Log((RT_SUCCESS(rcStrict)
614 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
615 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
616 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
617 return rcStrict;
618 }
619 }
620 else
621 {
622 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
623 if (RT_SUCCESS(rc))
624 { /* likely */ }
625 else
626 {
627 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
628 GCPtrPC, GCPhys, rc, cbToTryRead));
629 return rc;
630 }
631 }
632 pVCpu->iem.s.cbOpcode = cbToTryRead;
633#endif /* !IEM_WITH_CODE_TLB */
634 return VINF_SUCCESS;
635}
636
637
638#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
639/**
640 * Worker for iemTlbInvalidateAll.
641 */
642template<bool a_fGlobal>
643DECL_FORCE_INLINE(void) iemTlbInvalidateOne(IEMTLB *pTlb)
644{
645 if (!a_fGlobal)
646 pTlb->cTlsFlushes++;
647 else
648 pTlb->cTlsGlobalFlushes++;
649
650 pTlb->uTlbRevision += IEMTLB_REVISION_INCR;
651 if (RT_LIKELY(pTlb->uTlbRevision != 0))
652 { /* very likely */ }
653 else
654 {
655 pTlb->uTlbRevision = IEMTLB_REVISION_INCR;
656 pTlb->cTlbRevisionRollovers++;
657 unsigned i = RT_ELEMENTS(pTlb->aEntries) / 2;
658 while (i-- > 0)
659 pTlb->aEntries[i * 2].uTag = 0;
660 }
661 if (a_fGlobal)
662 {
663 pTlb->uTlbRevisionGlobal += IEMTLB_REVISION_INCR;
664 if (RT_LIKELY(pTlb->uTlbRevisionGlobal != 0))
665 { /* very likely */ }
666 else
667 {
668 pTlb->uTlbRevisionGlobal = IEMTLB_REVISION_INCR;
669 pTlb->cTlbRevisionRollovers++;
670 unsigned i = RT_ELEMENTS(pTlb->aEntries) / 2;
671 while (i-- > 0)
672 pTlb->aEntries[i * 2 + 1].uTag = 0;
673 }
674 }
675}
676#endif
677
678
679/**
680 * Worker for IEMTlbInvalidateAll and IEMTlbInvalidateAllGlobal.
681 */
682template<bool a_fGlobal>
683DECL_FORCE_INLINE(void) iemTlbInvalidateAll(PVMCPUCC pVCpu)
684{
685#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
686 Log10(("IEMTlbInvalidateAll\n"));
687
688# ifdef IEM_WITH_CODE_TLB
689 pVCpu->iem.s.cbInstrBufTotal = 0;
690 iemTlbInvalidateOne<a_fGlobal>(&pVCpu->iem.s.CodeTlb);
691# endif
692
693# ifdef IEM_WITH_DATA_TLB
694 iemTlbInvalidateOne<a_fGlobal>(&pVCpu->iem.s.DataTlb);
695# endif
696#else
697 RT_NOREF(pVCpu);
698#endif
699}
700
701
702/**
703 * Invalidates non-global the IEM TLB entries.
704 *
705 * This is called internally as well as by PGM when moving GC mappings.
706 *
707 * @param pVCpu The cross context virtual CPU structure of the calling
708 * thread.
709 */
710VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
711{
712 iemTlbInvalidateAll<false>(pVCpu);
713}
714
715
716/**
717 * Invalidates all the IEM TLB entries.
718 *
719 * This is called internally as well as by PGM when moving GC mappings.
720 *
721 * @param pVCpu The cross context virtual CPU structure of the calling
722 * thread.
723 */
724VMM_INT_DECL(void) IEMTlbInvalidateAllGlobal(PVMCPUCC pVCpu)
725{
726 iemTlbInvalidateAll<true>(pVCpu);
727}
728
729
730#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
731template<bool a_fDataTlb>
732DECLINLINE(void) iemTlbInvalidatePageWorker(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR GCPtrTag, uintptr_t idxEven)
733{
734 /*
735 * Flush the entry pair.
736 *
737 * We ASSUME that the guest hasn't tricked us into loading one of these
738 * from a large page and the other from a regular 4KB page. This is made
739 * much less of a problem, in that the guest would also have to flip the
740 * G bit to accomplish this.
741 */
742 bool fMaybeLargePage = true;
743 if (pTlb->aEntries[idxEven].uTag == (GCPtrTag | pTlb->uTlbRevision))
744 {
745 pTlb->aEntries[idxEven].uTag = 0;
746 fMaybeLargePage = RT_BOOL(pTlb->aEntries[idxEven].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE);
747 if (!a_fDataTlb && GCPtrTag == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
748 pVCpu->iem.s.cbInstrBufTotal = 0;
749 }
750 if (pTlb->aEntries[idxEven + 1].uTag == (GCPtrTag | pTlb->uTlbRevisionGlobal))
751 {
752 pTlb->aEntries[idxEven + 1].uTag = 0;
753 fMaybeLargePage = RT_BOOL(pTlb->aEntries[idxEven].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE);
754 if (!a_fDataTlb && GCPtrTag == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
755 pVCpu->iem.s.cbInstrBufTotal = 0;
756 }
757
758 /*
759 * If we cannot rule out a large page, we have to scan all the 4K TLB
760 * entries such a page covers to ensure we evict all relevant entries.
761 * ASSUMES that tag calculation is a right shift by GUEST_PAGE_SHIFT.
762 */
763 if (fMaybeLargePage)
764 {
765 AssertCompile(IEMTLB_CALC_TAG_NO_REV((RTGCPTR)0x8731U << GUEST_PAGE_SHIFT) == 0x8731U);
766 RTGCPTR const GCPtrInstrBufPcTag = IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc);
767 if ((pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE) && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
768 {
769 /* 2MB large page */
770 GCPtrTag &= ~(RTGCPTR)(RT_BIT_64(21 - GUEST_PAGE_SHIFT) - 1U);
771 RTGCPTR GCPtrTagGlob = GCPtrTag | pTlb->uTlbRevisionGlobal;
772 GCPtrTag |= pTlb->uTlbRevision;
773
774# if IEMTLB_ENTRY_COUNT >= 512
775 idxEven = IEMTLB_TAG_TO_EVEN_INDEX(GCPtrTag);
776 RTGCPTR const GCPtrTagMask = ~(RTGCPTR)0;
777 uintptr_t const idxEvenEnd = idxEven + 512;
778# else
779 RTGCPTR const GCPtrTagMask = ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK
780 & ~(RTGCPTR)( (RT_BIT_64(9 - IEMTLB_ENTRY_COUNT_AS_POWER_OF_TWO) - 1U)
781 << IEMTLB_ENTRY_COUNT_AS_POWER_OF_TWO);
782 uintptr_t const idxEvenEnd = IEMTLB_ENTRY_COUNT;
783# endif
784 for (idxEven = 0; idxEven < idxEvenEnd; idxEven += 2)
785 {
786 if ((pTlb->aEntries[idxEven].uTag & GCPtrTagMask) == GCPtrTag)
787 {
788 Assert(pTlb->aEntries[idxEven].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE); /* bad guest */
789 pTlb->aEntries[idxEven].uTag = 0;
790 if (!a_fDataTlb && GCPtrTag == GCPtrInstrBufPcTag)
791 pVCpu->iem.s.cbInstrBufTotal = 0;
792 }
793 if ((pTlb->aEntries[idxEven + 1].uTag & GCPtrTagMask) == GCPtrTagGlob)
794 {
795 Assert(pTlb->aEntries[idxEven + 1].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE); /* bad guest */
796 pTlb->aEntries[idxEven + 1].uTag = 0;
797 if (!a_fDataTlb && GCPtrTag == GCPtrInstrBufPcTag)
798 pVCpu->iem.s.cbInstrBufTotal = 0;
799 }
800 GCPtrTag++;
801 GCPtrTagGlob++;
802 }
803 }
804 else
805 {
806 /* 4MB large page */
807 GCPtrTag &= ~(RTGCPTR)(RT_BIT_64(22 - GUEST_PAGE_SHIFT) - 1U);
808 RTGCPTR GCPtrTagGlob = GCPtrTag | pTlb->uTlbRevisionGlobal;
809 GCPtrTag |= pTlb->uTlbRevision;
810
811# if IEMTLB_ENTRY_COUNT >= 1024
812 idxEven = IEMTLB_TAG_TO_EVEN_INDEX(GCPtrTag);
813 RTGCPTR const GCPtrTagMask = ~(RTGCPTR)0;
814 uintptr_t const idxEvenEnd = idxEven + 1024;
815# else
816 RTGCPTR const GCPtrTagMask = ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK
817 & ~(RTGCPTR)( (RT_BIT_64(10 - IEMTLB_ENTRY_COUNT_AS_POWER_OF_TWO) - 1U)
818 << IEMTLB_ENTRY_COUNT_AS_POWER_OF_TWO);
819 uintptr_t const idxEvenEnd = IEMTLB_ENTRY_COUNT;
820# endif
821 for (idxEven = 0; idxEven < idxEvenEnd; idxEven += 2)
822 {
823 if ((pTlb->aEntries[idxEven].uTag & GCPtrTagMask) == GCPtrTag)
824 {
825 Assert(pTlb->aEntries[idxEven].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE); /* bad guest */
826 pTlb->aEntries[idxEven].uTag = 0;
827 if (!a_fDataTlb && GCPtrTag == GCPtrInstrBufPcTag)
828 pVCpu->iem.s.cbInstrBufTotal = 0;
829 }
830 if ((pTlb->aEntries[idxEven + 1].uTag & GCPtrTagMask) == GCPtrTagGlob)
831 {
832 Assert(pTlb->aEntries[idxEven + 1].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE); /* bad guest */
833 pTlb->aEntries[idxEven + 1].uTag = 0;
834 if (!a_fDataTlb && GCPtrTag == GCPtrInstrBufPcTag)
835 pVCpu->iem.s.cbInstrBufTotal = 0;
836 }
837 GCPtrTag++;
838 GCPtrTagGlob++;
839 }
840 }
841 }
842}
843#endif
844
845
846/**
847 * Invalidates a page in the TLBs.
848 *
849 * @param pVCpu The cross context virtual CPU structure of the calling
850 * thread.
851 * @param GCPtr The address of the page to invalidate
852 * @thread EMT(pVCpu)
853 */
854VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
855{
856#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
857 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
858 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
859 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
860 uintptr_t const idxEven = IEMTLB_TAG_TO_EVEN_INDEX(GCPtr);
861
862# ifdef IEM_WITH_CODE_TLB
863 iemTlbInvalidatePageWorker<false>(pVCpu, &pVCpu->iem.s.CodeTlb, GCPtr, idxEven);
864# endif
865# ifdef IEM_WITH_DATA_TLB
866 iemTlbInvalidatePageWorker<true>(pVCpu, &pVCpu->iem.s.DataTlb, GCPtr, idxEven);
867# endif
868#else
869 NOREF(pVCpu); NOREF(GCPtr);
870#endif
871}
872
873
874#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
875/**
876 * Invalid both TLBs slow fashion following a rollover.
877 *
878 * Worker for IEMTlbInvalidateAllPhysical,
879 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
880 * iemMemMapJmp and others.
881 *
882 * @thread EMT(pVCpu)
883 */
884static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
885{
886 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
887 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
888 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
889
890 unsigned i;
891# ifdef IEM_WITH_CODE_TLB
892 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
893 while (i-- > 0)
894 {
895 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
896 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
897 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
898 }
899 pVCpu->iem.s.CodeTlb.cTlbPhysRevRollovers++;
900 pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
901# endif
902# ifdef IEM_WITH_DATA_TLB
903 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
904 while (i-- > 0)
905 {
906 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
907 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
908 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
909 }
910 pVCpu->iem.s.DataTlb.cTlbPhysRevRollovers++;
911 pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
912# endif
913
914}
915#endif
916
917
918/**
919 * Invalidates the host physical aspects of the IEM TLBs.
920 *
921 * This is called internally as well as by PGM when moving GC mappings.
922 *
923 * @param pVCpu The cross context virtual CPU structure of the calling
924 * thread.
925 * @note Currently not used.
926 */
927VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
928{
929#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
930 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
931 Log10(("IEMTlbInvalidateAllPhysical\n"));
932
933# ifdef IEM_WITH_CODE_TLB
934 pVCpu->iem.s.cbInstrBufTotal = 0;
935# endif
936 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
937 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
938 {
939 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
940 pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
941 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
942 pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
943 }
944 else
945 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
946#else
947 NOREF(pVCpu);
948#endif
949}
950
951
952/**
953 * Invalidates the host physical aspects of the IEM TLBs.
954 *
955 * This is called internally as well as by PGM when moving GC mappings.
956 *
957 * @param pVM The cross context VM structure.
958 * @param idCpuCaller The ID of the calling EMT if available to the caller,
959 * otherwise NIL_VMCPUID.
960 * @param enmReason The reason we're called.
961 *
962 * @remarks Caller holds the PGM lock.
963 */
964VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller, IEMTLBPHYSFLUSHREASON enmReason)
965{
966#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
967 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
968 if (pVCpuCaller)
969 VMCPU_ASSERT_EMT(pVCpuCaller);
970 Log10(("IEMTlbInvalidateAllPhysicalAllCpus: %d\n", enmReason)); RT_NOREF(enmReason);
971
972 VMCC_FOR_EACH_VMCPU(pVM)
973 {
974# ifdef IEM_WITH_CODE_TLB
975 if (pVCpuCaller == pVCpu)
976 pVCpu->iem.s.cbInstrBufTotal = 0;
977# endif
978
979 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
980 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
981 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
982 { /* likely */}
983 else if (pVCpuCaller != pVCpu)
984 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
985 else
986 {
987 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
988 continue;
989 }
990 if (ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev))
991 pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
992
993 if (ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev))
994 pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
995 }
996 VMCC_FOR_EACH_VMCPU_END(pVM);
997
998#else
999 RT_NOREF(pVM, idCpuCaller, enmReason);
1000#endif
1001}
1002
1003
1004/**
1005 * Flushes the prefetch buffer, light version.
1006 */
1007void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr)
1008{
1009#ifndef IEM_WITH_CODE_TLB
1010 pVCpu->iem.s.cbOpcode = cbInstr;
1011#else
1012 RT_NOREF(pVCpu, cbInstr);
1013#endif
1014}
1015
1016
1017/**
1018 * Flushes the prefetch buffer, heavy version.
1019 */
1020void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr)
1021{
1022#ifndef IEM_WITH_CODE_TLB
1023 pVCpu->iem.s.cbOpcode = cbInstr; /* Note! SVM and VT-x may set this to zero on exit, rather than the instruction length. */
1024#elif 1
1025 pVCpu->iem.s.cbInstrBufTotal = 0;
1026 RT_NOREF(cbInstr);
1027#else
1028 RT_NOREF(pVCpu, cbInstr);
1029#endif
1030}
1031
1032
1033
1034#ifdef IEM_WITH_CODE_TLB
1035
1036/**
1037 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1038 * failure and jumps.
1039 *
1040 * We end up here for a number of reasons:
1041 * - pbInstrBuf isn't yet initialized.
1042 * - Advancing beyond the buffer boundrary (e.g. cross page).
1043 * - Advancing beyond the CS segment limit.
1044 * - Fetching from non-mappable page (e.g. MMIO).
1045 * - TLB loading in the recompiler (@a pvDst = NULL, @a cbDst = 0).
1046 *
1047 * @param pVCpu The cross context virtual CPU structure of the
1048 * calling thread.
1049 * @param pvDst Where to return the bytes.
1050 * @param cbDst Number of bytes to read. A value of zero is
1051 * allowed for initializing pbInstrBuf (the
1052 * recompiler does this). In this case it is best
1053 * to set pbInstrBuf to NULL prior to the call.
1054 */
1055void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP
1056{
1057# ifdef IN_RING3
1058 for (;;)
1059 {
1060 Assert(cbDst <= 8);
1061 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1062
1063 /*
1064 * We might have a partial buffer match, deal with that first to make the
1065 * rest simpler. This is the first part of the cross page/buffer case.
1066 */
1067 uint8_t const * const pbInstrBuf = pVCpu->iem.s.pbInstrBuf;
1068 if (pbInstrBuf != NULL)
1069 {
1070 Assert(cbDst != 0); /* pbInstrBuf shall be NULL in case of a TLB load */
1071 uint32_t const cbInstrBuf = pVCpu->iem.s.cbInstrBuf;
1072 if (offBuf < cbInstrBuf)
1073 {
1074 Assert(offBuf + cbDst > cbInstrBuf);
1075 uint32_t const cbCopy = cbInstrBuf - offBuf;
1076 memcpy(pvDst, &pbInstrBuf[offBuf], cbCopy);
1077
1078 cbDst -= cbCopy;
1079 pvDst = (uint8_t *)pvDst + cbCopy;
1080 offBuf += cbCopy;
1081 }
1082 }
1083
1084 /*
1085 * Check segment limit, figuring how much we're allowed to access at this point.
1086 *
1087 * We will fault immediately if RIP is past the segment limit / in non-canonical
1088 * territory. If we do continue, there are one or more bytes to read before we
1089 * end up in trouble and we need to do that first before faulting.
1090 */
1091 RTGCPTR GCPtrFirst;
1092 uint32_t cbMaxRead;
1093 if (IEM_IS_64BIT_CODE(pVCpu))
1094 {
1095 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1096 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1097 { /* likely */ }
1098 else
1099 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1100 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1101 }
1102 else
1103 {
1104 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1105 /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
1106 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
1107 { /* likely */ }
1108 else /** @todo For CPUs older than the 386, we should not necessarily generate \#GP here but wrap around! */
1109 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1110 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1111 if (cbMaxRead != 0)
1112 { /* likely */ }
1113 else
1114 {
1115 /* Overflowed because address is 0 and limit is max. */
1116 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1117 cbMaxRead = X86_PAGE_SIZE;
1118 }
1119 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
1120 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1121 if (cbMaxRead2 < cbMaxRead)
1122 cbMaxRead = cbMaxRead2;
1123 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1124 }
1125
1126 /*
1127 * Get the TLB entry for this piece of code.
1128 */
1129 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrFirst);
1130 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.CodeTlb, uTagNoRev);
1131 if ( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevision)
1132 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal))
1133 {
1134 /* likely when executing lots of code, otherwise unlikely */
1135# ifdef IEM_WITH_TLB_STATISTICS
1136 pVCpu->iem.s.CodeTlb.cTlbCoreHits++;
1137# endif
1138 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
1139
1140 /* Check TLB page table level access flags. */
1141 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1142 {
1143 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && IEM_GET_CPL(pVCpu) == 3)
1144 {
1145 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1146 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1147 }
1148 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1149 {
1150 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1151 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1152 }
1153 }
1154
1155 /* Look up the physical page info if necessary. */
1156 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1157 { /* not necessary */ }
1158 else
1159 {
1160 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
1161 { /* likely */ }
1162 else
1163 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
1164 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;
1165 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1166 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1167 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
1168 }
1169 }
1170 else
1171 {
1172 pVCpu->iem.s.CodeTlb.cTlbCoreMisses++;
1173
1174 /* This page table walking will set A bits as required by the access while performing the walk.
1175 ASSUMES these are set when the address is translated rather than on commit... */
1176 /** @todo testcase: check when A bits are actually set by the CPU for code. */
1177 PGMPTWALKFAST WalkFast;
1178 int rc = PGMGstQueryPageFast(pVCpu, GCPtrFirst,
1179 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
1180 &WalkFast);
1181 if (RT_SUCCESS(rc))
1182 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
1183 else
1184 {
1185#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1186 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? OF COURSE! */
1187 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
1188#endif
1189 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1190 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, rc);
1191 }
1192
1193 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1194 if ( !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)
1195 || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */
1196 {
1197 pTlbe--;
1198 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevision;
1199 }
1200 else
1201 {
1202 pVCpu->iem.s.CodeTlb.cTlbCoreGlobalLoads++;
1203 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal;
1204 }
1205 pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
1206 | (WalkFast.fEffective >> X86_PTE_PAE_BIT_NX) /*IEMTLBE_F_PT_NO_EXEC*/
1207 | (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE);
1208 RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
1209 pTlbe->GCPhys = GCPhysPg;
1210 pTlbe->pbMappingR3 = NULL;
1211 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
1212 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) || IEM_GET_CPL(pVCpu) != 3);
1213 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
1214
1215 /* Resolve the physical address. */
1216 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
1217 { /* likely */ }
1218 else
1219 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
1220 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));
1221 rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1222 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1223 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
1224 }
1225
1226# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
1227 /*
1228 * Try do a direct read using the pbMappingR3 pointer.
1229 * Note! Do not recheck the physical TLB revision number here as we have the
1230 * wrong response to changes in the else case. If someone is updating
1231 * pVCpu->iem.s.CodeTlb.uTlbPhysRev in parallel to us, we should be fine
1232 * pretending we always won the race.
1233 */
1234 if ( (pTlbe->fFlagsAndPhysRev & (/*IEMTLBE_F_PHYS_REV |*/ IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1235 == /*pVCpu->iem.s.CodeTlb.uTlbPhysRev*/ 0U)
1236 {
1237 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1238 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1239 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1240 {
1241 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1242 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1243 }
1244 else
1245 {
1246 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1247 if (cbInstr + (uint32_t)cbDst <= 15)
1248 {
1249 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1250 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1251 }
1252 else
1253 {
1254 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1255 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1256 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1257 }
1258 }
1259 if (cbDst <= cbMaxRead)
1260 {
1261 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0; /** @todo Spurious load effect on branch handling? */
1262 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1263
1264 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1265 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1266 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1267 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1268 if (cbDst > 0) /* To make ASAN happy in the TLB load case. */
1269 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1270 else
1271 Assert(!pvDst);
1272 return;
1273 }
1274 pVCpu->iem.s.pbInstrBuf = NULL;
1275
1276 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1277 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1278 }
1279# else
1280# error "refactor as needed"
1281 /*
1282 * If there is no special read handling, so we can read a bit more and
1283 * put it in the prefetch buffer.
1284 */
1285 if ( cbDst < cbMaxRead
1286 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1287 {
1288 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1289 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1290 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1291 { /* likely */ }
1292 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1293 {
1294 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1295 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1296 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1297 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));
1298 }
1299 else
1300 {
1301 Log((RT_SUCCESS(rcStrict)
1302 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1303 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1304 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1305 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1306 }
1307 }
1308# endif
1309 /*
1310 * Special read handling, so only read exactly what's needed.
1311 * This is a highly unlikely scenario.
1312 */
1313 else
1314 {
1315 pVCpu->iem.s.CodeTlb.cTlbSlowCodeReadPath++;
1316
1317 /* Check instruction length. */
1318 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1319 if (RT_LIKELY(cbInstr + cbDst <= 15))
1320 { /* likely */ }
1321 else
1322 {
1323 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0) [slow]\n",
1324 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1325 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1326 }
1327
1328 /* Do the reading. */
1329 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1330 if (cbToRead > 0)
1331 {
1332 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1333 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1334 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1335 { /* likely */ }
1336 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1337 {
1338 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1339 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1340 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1341 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));
1342 }
1343 else
1344 {
1345 Log((RT_SUCCESS(rcStrict)
1346 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1347 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1348 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1349 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1350 }
1351 }
1352
1353 /* Update the state and probably return. */
1354 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1355 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0;
1356 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1357
1358 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1359 pVCpu->iem.s.offInstrNextByte = offPg + cbInstr + cbToRead;
1360 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead + cbInstr) - cbToRead - cbInstr;
1361 pVCpu->iem.s.cbInstrBufTotal = X86_PAGE_SIZE; /** @todo ??? */
1362 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1363 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1364 pVCpu->iem.s.pbInstrBuf = NULL;
1365 if (cbToRead == cbDst)
1366 return;
1367 Assert(cbToRead == cbMaxRead);
1368 }
1369
1370 /*
1371 * More to read, loop.
1372 */
1373 cbDst -= cbMaxRead;
1374 pvDst = (uint8_t *)pvDst + cbMaxRead;
1375 }
1376# else /* !IN_RING3 */
1377 RT_NOREF(pvDst, cbDst);
1378 if (pvDst || cbDst)
1379 IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);
1380# endif /* !IN_RING3 */
1381}
1382
1383#else /* !IEM_WITH_CODE_TLB */
1384
1385/**
1386 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1387 * exception if it fails.
1388 *
1389 * @returns Strict VBox status code.
1390 * @param pVCpu The cross context virtual CPU structure of the
1391 * calling thread.
1392 * @param cbMin The minimum number of bytes relative offOpcode
1393 * that must be read.
1394 */
1395VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
1396{
1397 /*
1398 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1399 *
1400 * First translate CS:rIP to a physical address.
1401 */
1402 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;
1403 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1404 uint8_t const cbLeft = cbOpcode - offOpcode;
1405 Assert(cbLeft < cbMin);
1406 Assert(cbOpcode <= sizeof(pVCpu->iem.s.abOpcode));
1407
1408 uint32_t cbToTryRead;
1409 RTGCPTR GCPtrNext;
1410 if (IEM_IS_64BIT_CODE(pVCpu))
1411 {
1412 GCPtrNext = pVCpu->cpum.GstCtx.rip + cbOpcode;
1413 if (!IEM_IS_CANONICAL(GCPtrNext))
1414 return iemRaiseGeneralProtectionFault0(pVCpu);
1415 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1416 }
1417 else
1418 {
1419 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1420 /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
1421 GCPtrNext32 += cbOpcode;
1422 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1423 /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
1424 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1425 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1426 if (!cbToTryRead) /* overflowed */
1427 {
1428 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1429 cbToTryRead = UINT32_MAX;
1430 /** @todo check out wrapping around the code segment. */
1431 }
1432 if (cbToTryRead < cbMin - cbLeft)
1433 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1434 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1435
1436 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1437 if (cbToTryRead > cbLeftOnPage)
1438 cbToTryRead = cbLeftOnPage;
1439 }
1440
1441 /* Restrict to opcode buffer space.
1442
1443 We're making ASSUMPTIONS here based on work done previously in
1444 iemInitDecoderAndPrefetchOpcodes, where bytes from the first page will
1445 be fetched in case of an instruction crossing two pages. */
1446 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - cbOpcode)
1447 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - cbOpcode;
1448 if (RT_LIKELY(cbToTryRead + cbLeft >= cbMin))
1449 { /* likely */ }
1450 else
1451 {
1452 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1453 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, offOpcode, cbMin));
1454 return iemRaiseGeneralProtectionFault0(pVCpu);
1455 }
1456
1457 PGMPTWALKFAST WalkFast;
1458 int rc = PGMGstQueryPageFast(pVCpu, GCPtrNext,
1459 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
1460 &WalkFast);
1461 if (RT_SUCCESS(rc))
1462 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
1463 else
1464 {
1465 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1466#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1467 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
1468 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1469#endif
1470 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc);
1471 }
1472 Assert((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3);
1473 Assert(!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
1474
1475 RTGCPHYS const GCPhys = WalkFast.GCPhys;
1476 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode));
1477
1478 /*
1479 * Read the bytes at this address.
1480 *
1481 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1482 * and since PATM should only patch the start of an instruction there
1483 * should be no need to check again here.
1484 */
1485 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
1486 {
1487 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[cbOpcode],
1488 cbToTryRead, PGMACCESSORIGIN_IEM);
1489 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1490 { /* likely */ }
1491 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1492 {
1493 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1494 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1495 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1496 }
1497 else
1498 {
1499 Log((RT_SUCCESS(rcStrict)
1500 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1501 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1502 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1503 return rcStrict;
1504 }
1505 }
1506 else
1507 {
1508 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[cbOpcode], GCPhys, cbToTryRead);
1509 if (RT_SUCCESS(rc))
1510 { /* likely */ }
1511 else
1512 {
1513 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1514 return rc;
1515 }
1516 }
1517 pVCpu->iem.s.cbOpcode = cbOpcode + cbToTryRead;
1518 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1519
1520 return VINF_SUCCESS;
1521}
1522
1523#endif /* !IEM_WITH_CODE_TLB */
1524#ifndef IEM_WITH_SETJMP
1525
1526/**
1527 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1528 *
1529 * @returns Strict VBox status code.
1530 * @param pVCpu The cross context virtual CPU structure of the
1531 * calling thread.
1532 * @param pb Where to return the opcode byte.
1533 */
1534VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1535{
1536 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1537 if (rcStrict == VINF_SUCCESS)
1538 {
1539 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1540 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1541 pVCpu->iem.s.offOpcode = offOpcode + 1;
1542 }
1543 else
1544 *pb = 0;
1545 return rcStrict;
1546}
1547
1548#else /* IEM_WITH_SETJMP */
1549
1550/**
1551 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1552 *
1553 * @returns The opcode byte.
1554 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1555 */
1556uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1557{
1558# ifdef IEM_WITH_CODE_TLB
1559 uint8_t u8;
1560 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1561 return u8;
1562# else
1563 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1564 if (rcStrict == VINF_SUCCESS)
1565 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1566 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1567# endif
1568}
1569
1570#endif /* IEM_WITH_SETJMP */
1571
1572#ifndef IEM_WITH_SETJMP
1573
1574/**
1575 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1576 *
1577 * @returns Strict VBox status code.
1578 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1579 * @param pu16 Where to return the opcode dword.
1580 */
1581VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1582{
1583 uint8_t u8;
1584 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1585 if (rcStrict == VINF_SUCCESS)
1586 *pu16 = (int8_t)u8;
1587 return rcStrict;
1588}
1589
1590
1591/**
1592 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1593 *
1594 * @returns Strict VBox status code.
1595 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1596 * @param pu32 Where to return the opcode dword.
1597 */
1598VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1599{
1600 uint8_t u8;
1601 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1602 if (rcStrict == VINF_SUCCESS)
1603 *pu32 = (int8_t)u8;
1604 return rcStrict;
1605}
1606
1607
1608/**
1609 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1610 *
1611 * @returns Strict VBox status code.
1612 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1613 * @param pu64 Where to return the opcode qword.
1614 */
1615VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1616{
1617 uint8_t u8;
1618 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1619 if (rcStrict == VINF_SUCCESS)
1620 *pu64 = (int8_t)u8;
1621 return rcStrict;
1622}
1623
1624#endif /* !IEM_WITH_SETJMP */
1625
1626
1627#ifndef IEM_WITH_SETJMP
1628
1629/**
1630 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1631 *
1632 * @returns Strict VBox status code.
1633 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1634 * @param pu16 Where to return the opcode word.
1635 */
1636VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1637{
1638 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1639 if (rcStrict == VINF_SUCCESS)
1640 {
1641 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1642# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1643 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1644# else
1645 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1646# endif
1647 pVCpu->iem.s.offOpcode = offOpcode + 2;
1648 }
1649 else
1650 *pu16 = 0;
1651 return rcStrict;
1652}
1653
1654#else /* IEM_WITH_SETJMP */
1655
1656/**
1657 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1658 *
1659 * @returns The opcode word.
1660 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1661 */
1662uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1663{
1664# ifdef IEM_WITH_CODE_TLB
1665 uint16_t u16;
1666 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1667 return u16;
1668# else
1669 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1670 if (rcStrict == VINF_SUCCESS)
1671 {
1672 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1673 pVCpu->iem.s.offOpcode += 2;
1674# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1675 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1676# else
1677 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1678# endif
1679 }
1680 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1681# endif
1682}
1683
1684#endif /* IEM_WITH_SETJMP */
1685
1686#ifndef IEM_WITH_SETJMP
1687
1688/**
1689 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1690 *
1691 * @returns Strict VBox status code.
1692 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1693 * @param pu32 Where to return the opcode double word.
1694 */
1695VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1696{
1697 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1698 if (rcStrict == VINF_SUCCESS)
1699 {
1700 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1701 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1702 pVCpu->iem.s.offOpcode = offOpcode + 2;
1703 }
1704 else
1705 *pu32 = 0;
1706 return rcStrict;
1707}
1708
1709
1710/**
1711 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1712 *
1713 * @returns Strict VBox status code.
1714 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1715 * @param pu64 Where to return the opcode quad word.
1716 */
1717VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1718{
1719 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1720 if (rcStrict == VINF_SUCCESS)
1721 {
1722 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1723 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1724 pVCpu->iem.s.offOpcode = offOpcode + 2;
1725 }
1726 else
1727 *pu64 = 0;
1728 return rcStrict;
1729}
1730
1731#endif /* !IEM_WITH_SETJMP */
1732
1733#ifndef IEM_WITH_SETJMP
1734
1735/**
1736 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1737 *
1738 * @returns Strict VBox status code.
1739 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1740 * @param pu32 Where to return the opcode dword.
1741 */
1742VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1743{
1744 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1745 if (rcStrict == VINF_SUCCESS)
1746 {
1747 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1748# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1749 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1750# else
1751 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1752 pVCpu->iem.s.abOpcode[offOpcode + 1],
1753 pVCpu->iem.s.abOpcode[offOpcode + 2],
1754 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1755# endif
1756 pVCpu->iem.s.offOpcode = offOpcode + 4;
1757 }
1758 else
1759 *pu32 = 0;
1760 return rcStrict;
1761}
1762
1763#else /* IEM_WITH_SETJMP */
1764
1765/**
1766 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1767 *
1768 * @returns The opcode dword.
1769 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1770 */
1771uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1772{
1773# ifdef IEM_WITH_CODE_TLB
1774 uint32_t u32;
1775 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1776 return u32;
1777# else
1778 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1779 if (rcStrict == VINF_SUCCESS)
1780 {
1781 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1782 pVCpu->iem.s.offOpcode = offOpcode + 4;
1783# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1784 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1785# else
1786 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1787 pVCpu->iem.s.abOpcode[offOpcode + 1],
1788 pVCpu->iem.s.abOpcode[offOpcode + 2],
1789 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1790# endif
1791 }
1792 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1793# endif
1794}
1795
1796#endif /* IEM_WITH_SETJMP */
1797
1798#ifndef IEM_WITH_SETJMP
1799
1800/**
1801 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1802 *
1803 * @returns Strict VBox status code.
1804 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1805 * @param pu64 Where to return the opcode dword.
1806 */
1807VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1808{
1809 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1810 if (rcStrict == VINF_SUCCESS)
1811 {
1812 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1813 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1814 pVCpu->iem.s.abOpcode[offOpcode + 1],
1815 pVCpu->iem.s.abOpcode[offOpcode + 2],
1816 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1817 pVCpu->iem.s.offOpcode = offOpcode + 4;
1818 }
1819 else
1820 *pu64 = 0;
1821 return rcStrict;
1822}
1823
1824
1825/**
1826 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1827 *
1828 * @returns Strict VBox status code.
1829 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1830 * @param pu64 Where to return the opcode qword.
1831 */
1832VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1833{
1834 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1835 if (rcStrict == VINF_SUCCESS)
1836 {
1837 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1838 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1839 pVCpu->iem.s.abOpcode[offOpcode + 1],
1840 pVCpu->iem.s.abOpcode[offOpcode + 2],
1841 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1842 pVCpu->iem.s.offOpcode = offOpcode + 4;
1843 }
1844 else
1845 *pu64 = 0;
1846 return rcStrict;
1847}
1848
1849#endif /* !IEM_WITH_SETJMP */
1850
1851#ifndef IEM_WITH_SETJMP
1852
1853/**
1854 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1855 *
1856 * @returns Strict VBox status code.
1857 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1858 * @param pu64 Where to return the opcode qword.
1859 */
1860VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1861{
1862 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1863 if (rcStrict == VINF_SUCCESS)
1864 {
1865 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1866# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1867 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1868# else
1869 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1870 pVCpu->iem.s.abOpcode[offOpcode + 1],
1871 pVCpu->iem.s.abOpcode[offOpcode + 2],
1872 pVCpu->iem.s.abOpcode[offOpcode + 3],
1873 pVCpu->iem.s.abOpcode[offOpcode + 4],
1874 pVCpu->iem.s.abOpcode[offOpcode + 5],
1875 pVCpu->iem.s.abOpcode[offOpcode + 6],
1876 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1877# endif
1878 pVCpu->iem.s.offOpcode = offOpcode + 8;
1879 }
1880 else
1881 *pu64 = 0;
1882 return rcStrict;
1883}
1884
1885#else /* IEM_WITH_SETJMP */
1886
1887/**
1888 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1889 *
1890 * @returns The opcode qword.
1891 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1892 */
1893uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1894{
1895# ifdef IEM_WITH_CODE_TLB
1896 uint64_t u64;
1897 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1898 return u64;
1899# else
1900 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1901 if (rcStrict == VINF_SUCCESS)
1902 {
1903 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1904 pVCpu->iem.s.offOpcode = offOpcode + 8;
1905# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1906 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1907# else
1908 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1909 pVCpu->iem.s.abOpcode[offOpcode + 1],
1910 pVCpu->iem.s.abOpcode[offOpcode + 2],
1911 pVCpu->iem.s.abOpcode[offOpcode + 3],
1912 pVCpu->iem.s.abOpcode[offOpcode + 4],
1913 pVCpu->iem.s.abOpcode[offOpcode + 5],
1914 pVCpu->iem.s.abOpcode[offOpcode + 6],
1915 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1916# endif
1917 }
1918 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1919# endif
1920}
1921
1922#endif /* IEM_WITH_SETJMP */
1923
1924
1925
1926/** @name Misc Worker Functions.
1927 * @{
1928 */
1929
1930/**
1931 * Gets the exception class for the specified exception vector.
1932 *
1933 * @returns The class of the specified exception.
1934 * @param uVector The exception vector.
1935 */
1936static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
1937{
1938 Assert(uVector <= X86_XCPT_LAST);
1939 switch (uVector)
1940 {
1941 case X86_XCPT_DE:
1942 case X86_XCPT_TS:
1943 case X86_XCPT_NP:
1944 case X86_XCPT_SS:
1945 case X86_XCPT_GP:
1946 case X86_XCPT_SX: /* AMD only */
1947 return IEMXCPTCLASS_CONTRIBUTORY;
1948
1949 case X86_XCPT_PF:
1950 case X86_XCPT_VE: /* Intel only */
1951 return IEMXCPTCLASS_PAGE_FAULT;
1952
1953 case X86_XCPT_DF:
1954 return IEMXCPTCLASS_DOUBLE_FAULT;
1955 }
1956 return IEMXCPTCLASS_BENIGN;
1957}
1958
1959
1960/**
1961 * Evaluates how to handle an exception caused during delivery of another event
1962 * (exception / interrupt).
1963 *
1964 * @returns How to handle the recursive exception.
1965 * @param pVCpu The cross context virtual CPU structure of the
1966 * calling thread.
1967 * @param fPrevFlags The flags of the previous event.
1968 * @param uPrevVector The vector of the previous event.
1969 * @param fCurFlags The flags of the current exception.
1970 * @param uCurVector The vector of the current exception.
1971 * @param pfXcptRaiseInfo Where to store additional information about the
1972 * exception condition. Optional.
1973 */
1974VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
1975 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
1976{
1977 /*
1978 * Only CPU exceptions can be raised while delivering other events, software interrupt
1979 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
1980 */
1981 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
1982 Assert(pVCpu); RT_NOREF(pVCpu);
1983 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
1984
1985 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
1986 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
1987 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1988 {
1989 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
1990 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
1991 {
1992 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
1993 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
1994 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
1995 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
1996 {
1997 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1998 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
1999 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
2000 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
2001 uCurVector, pVCpu->cpum.GstCtx.cr2));
2002 }
2003 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
2004 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
2005 {
2006 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
2007 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
2008 }
2009 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
2010 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
2011 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
2012 {
2013 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
2014 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
2015 }
2016 }
2017 else
2018 {
2019 if (uPrevVector == X86_XCPT_NMI)
2020 {
2021 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
2022 if (uCurVector == X86_XCPT_PF)
2023 {
2024 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
2025 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
2026 }
2027 }
2028 else if ( uPrevVector == X86_XCPT_AC
2029 && uCurVector == X86_XCPT_AC)
2030 {
2031 enmRaise = IEMXCPTRAISE_CPU_HANG;
2032 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
2033 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
2034 }
2035 }
2036 }
2037 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
2038 {
2039 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
2040 if (uCurVector == X86_XCPT_PF)
2041 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
2042 }
2043 else
2044 {
2045 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
2046 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
2047 }
2048
2049 if (pfXcptRaiseInfo)
2050 *pfXcptRaiseInfo = fRaiseInfo;
2051 return enmRaise;
2052}
2053
2054
2055/**
2056 * Enters the CPU shutdown state initiated by a triple fault or other
2057 * unrecoverable conditions.
2058 *
2059 * @returns Strict VBox status code.
2060 * @param pVCpu The cross context virtual CPU structure of the
2061 * calling thread.
2062 */
2063static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
2064{
2065 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2066 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
2067
2068 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
2069 {
2070 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
2071 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
2072 }
2073
2074 RT_NOREF(pVCpu);
2075 return VINF_EM_TRIPLE_FAULT;
2076}
2077
2078
2079/**
2080 * Validates a new SS segment.
2081 *
2082 * @returns VBox strict status code.
2083 * @param pVCpu The cross context virtual CPU structure of the
2084 * calling thread.
2085 * @param NewSS The new SS selctor.
2086 * @param uCpl The CPL to load the stack for.
2087 * @param pDesc Where to return the descriptor.
2088 */
2089static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
2090{
2091 /* Null selectors are not allowed (we're not called for dispatching
2092 interrupts with SS=0 in long mode). */
2093 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
2094 {
2095 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
2096 return iemRaiseTaskSwitchFault0(pVCpu);
2097 }
2098
2099 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
2100 if ((NewSS & X86_SEL_RPL) != uCpl)
2101 {
2102 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
2103 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2104 }
2105
2106 /*
2107 * Read the descriptor.
2108 */
2109 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
2110 if (rcStrict != VINF_SUCCESS)
2111 return rcStrict;
2112
2113 /*
2114 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
2115 */
2116 if (!pDesc->Legacy.Gen.u1DescType)
2117 {
2118 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
2119 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2120 }
2121
2122 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2123 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2124 {
2125 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
2126 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2127 }
2128 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
2129 {
2130 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
2131 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2132 }
2133
2134 /* Is it there? */
2135 /** @todo testcase: Is this checked before the canonical / limit check below? */
2136 if (!pDesc->Legacy.Gen.u1Present)
2137 {
2138 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
2139 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
2140 }
2141
2142 return VINF_SUCCESS;
2143}
2144
2145/** @} */
2146
2147
2148/** @name Raising Exceptions.
2149 *
2150 * @{
2151 */
2152
2153
2154/**
2155 * Loads the specified stack far pointer from the TSS.
2156 *
2157 * @returns VBox strict status code.
2158 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2159 * @param uCpl The CPL to load the stack for.
2160 * @param pSelSS Where to return the new stack segment.
2161 * @param puEsp Where to return the new stack pointer.
2162 */
2163static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
2164{
2165 VBOXSTRICTRC rcStrict;
2166 Assert(uCpl < 4);
2167
2168 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2169 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
2170 {
2171 /*
2172 * 16-bit TSS (X86TSS16).
2173 */
2174 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
2175 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2176 {
2177 uint32_t off = uCpl * 4 + 2;
2178 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
2179 {
2180 /** @todo check actual access pattern here. */
2181 uint32_t u32Tmp = 0; /* gcc maybe... */
2182 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2183 if (rcStrict == VINF_SUCCESS)
2184 {
2185 *puEsp = RT_LOWORD(u32Tmp);
2186 *pSelSS = RT_HIWORD(u32Tmp);
2187 return VINF_SUCCESS;
2188 }
2189 }
2190 else
2191 {
2192 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
2193 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2194 }
2195 break;
2196 }
2197
2198 /*
2199 * 32-bit TSS (X86TSS32).
2200 */
2201 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
2202 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2203 {
2204 uint32_t off = uCpl * 8 + 4;
2205 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
2206 {
2207/** @todo check actual access pattern here. */
2208 uint64_t u64Tmp;
2209 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2210 if (rcStrict == VINF_SUCCESS)
2211 {
2212 *puEsp = u64Tmp & UINT32_MAX;
2213 *pSelSS = (RTSEL)(u64Tmp >> 32);
2214 return VINF_SUCCESS;
2215 }
2216 }
2217 else
2218 {
2219 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
2220 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2221 }
2222 break;
2223 }
2224
2225 default:
2226 AssertFailed();
2227 rcStrict = VERR_IEM_IPE_4;
2228 break;
2229 }
2230
2231 *puEsp = 0; /* make gcc happy */
2232 *pSelSS = 0; /* make gcc happy */
2233 return rcStrict;
2234}
2235
2236
2237/**
2238 * Loads the specified stack pointer from the 64-bit TSS.
2239 *
2240 * @returns VBox strict status code.
2241 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2242 * @param uCpl The CPL to load the stack for.
2243 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2244 * @param puRsp Where to return the new stack pointer.
2245 */
2246static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
2247{
2248 Assert(uCpl < 4);
2249 Assert(uIst < 8);
2250 *puRsp = 0; /* make gcc happy */
2251
2252 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2253 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
2254
2255 uint32_t off;
2256 if (uIst)
2257 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
2258 else
2259 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
2260 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
2261 {
2262 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
2263 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2264 }
2265
2266 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2267}
2268
2269
2270/**
2271 * Adjust the CPU state according to the exception being raised.
2272 *
2273 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2274 * @param u8Vector The exception that has been raised.
2275 */
2276DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
2277{
2278 switch (u8Vector)
2279 {
2280 case X86_XCPT_DB:
2281 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
2282 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
2283 break;
2284 /** @todo Read the AMD and Intel exception reference... */
2285 }
2286}
2287
2288
2289/**
2290 * Implements exceptions and interrupts for real mode.
2291 *
2292 * @returns VBox strict status code.
2293 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2294 * @param cbInstr The number of bytes to offset rIP by in the return
2295 * address.
2296 * @param u8Vector The interrupt / exception vector number.
2297 * @param fFlags The flags.
2298 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2299 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2300 */
2301static VBOXSTRICTRC
2302iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
2303 uint8_t cbInstr,
2304 uint8_t u8Vector,
2305 uint32_t fFlags,
2306 uint16_t uErr,
2307 uint64_t uCr2) RT_NOEXCEPT
2308{
2309 NOREF(uErr); NOREF(uCr2);
2310 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2311
2312 /*
2313 * Read the IDT entry.
2314 */
2315 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2316 {
2317 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2318 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2319 }
2320 RTFAR16 Idte;
2321 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
2322 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2323 {
2324 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2325 return rcStrict;
2326 }
2327
2328#ifdef LOG_ENABLED
2329 /* If software interrupt, try decode it if logging is enabled and such. */
2330 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2331 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
2332 iemLogSyscallRealModeInt(pVCpu, u8Vector, cbInstr);
2333#endif
2334
2335 /*
2336 * Push the stack frame.
2337 */
2338 uint8_t bUnmapInfo;
2339 uint16_t *pu16Frame;
2340 uint64_t uNewRsp;
2341 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &bUnmapInfo, &uNewRsp);
2342 if (rcStrict != VINF_SUCCESS)
2343 return rcStrict;
2344
2345 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
2346#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
2347 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
2348 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
2349 fEfl |= UINT16_C(0xf000);
2350#endif
2351 pu16Frame[2] = (uint16_t)fEfl;
2352 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
2353 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
2354 rcStrict = iemMemStackPushCommitSpecial(pVCpu, bUnmapInfo, uNewRsp);
2355 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2356 return rcStrict;
2357
2358 /*
2359 * Load the vector address into cs:ip and make exception specific state
2360 * adjustments.
2361 */
2362 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
2363 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
2364 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2365 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
2366 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2367 pVCpu->cpum.GstCtx.rip = Idte.off;
2368 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
2369 IEMMISC_SET_EFL(pVCpu, fEfl);
2370
2371 /** @todo do we actually do this in real mode? */
2372 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2373 iemRaiseXcptAdjustState(pVCpu, u8Vector);
2374
2375 /*
2376 * Deal with debug events that follows the exception and clear inhibit flags.
2377 */
2378 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2379 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))
2380 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
2381 else
2382 {
2383 Log(("iemRaiseXcptOrIntInRealMode: Raising #DB after %#x; pending=%#x\n",
2384 u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));
2385 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
2386 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
2387 >> CPUMCTX_DBG_HIT_DRX_SHIFT;
2388 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
2389 return iemRaiseDebugException(pVCpu);
2390 }
2391
2392 /* The IEM_F_MODE_XXX and IEM_F_X86_CPL_MASK doesn't really change here,
2393 so best leave them alone in case we're in a weird kind of real mode... */
2394
2395 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2396}
2397
2398
2399/**
2400 * Loads a NULL data selector into when coming from V8086 mode.
2401 *
2402 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2403 * @param pSReg Pointer to the segment register.
2404 */
2405DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
2406{
2407 pSReg->Sel = 0;
2408 pSReg->ValidSel = 0;
2409 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2410 {
2411 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2412 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2413 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2414 }
2415 else
2416 {
2417 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2418 /** @todo check this on AMD-V */
2419 pSReg->u64Base = 0;
2420 pSReg->u32Limit = 0;
2421 }
2422}
2423
2424
2425/**
2426 * Loads a segment selector during a task switch in V8086 mode.
2427 *
2428 * @param pSReg Pointer to the segment register.
2429 * @param uSel The selector value to load.
2430 */
2431DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
2432{
2433 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2434 pSReg->Sel = uSel;
2435 pSReg->ValidSel = uSel;
2436 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2437 pSReg->u64Base = uSel << 4;
2438 pSReg->u32Limit = 0xffff;
2439 pSReg->Attr.u = 0xf3;
2440}
2441
2442
2443/**
2444 * Loads a segment selector during a task switch in protected mode.
2445 *
2446 * In this task switch scenario, we would throw \#TS exceptions rather than
2447 * \#GPs.
2448 *
2449 * @returns VBox strict status code.
2450 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2451 * @param pSReg Pointer to the segment register.
2452 * @param uSel The new selector value.
2453 *
2454 * @remarks This does _not_ handle CS or SS.
2455 * @remarks This expects IEM_GET_CPL(pVCpu) to return an up to date value.
2456 */
2457static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
2458{
2459 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2460
2461 /* Null data selector. */
2462 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2463 {
2464 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2465 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2466 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2467 return VINF_SUCCESS;
2468 }
2469
2470 /* Fetch the descriptor. */
2471 IEMSELDESC Desc;
2472 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2473 if (rcStrict != VINF_SUCCESS)
2474 {
2475 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2476 VBOXSTRICTRC_VAL(rcStrict)));
2477 return rcStrict;
2478 }
2479
2480 /* Must be a data segment or readable code segment. */
2481 if ( !Desc.Legacy.Gen.u1DescType
2482 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2483 {
2484 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2485 Desc.Legacy.Gen.u4Type));
2486 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2487 }
2488
2489 /* Check privileges for data segments and non-conforming code segments. */
2490 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2491 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2492 {
2493 /* The RPL and the new CPL must be less than or equal to the DPL. */
2494 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2495 || (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl))
2496 {
2497 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2498 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
2499 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2500 }
2501 }
2502
2503 /* Is it there? */
2504 if (!Desc.Legacy.Gen.u1Present)
2505 {
2506 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2507 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2508 }
2509
2510 /* The base and limit. */
2511 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2512 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2513
2514 /*
2515 * Ok, everything checked out fine. Now set the accessed bit before
2516 * committing the result into the registers.
2517 */
2518 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2519 {
2520 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2521 if (rcStrict != VINF_SUCCESS)
2522 return rcStrict;
2523 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2524 }
2525
2526 /* Commit */
2527 pSReg->Sel = uSel;
2528 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2529 pSReg->u32Limit = cbLimit;
2530 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2531 pSReg->ValidSel = uSel;
2532 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2533 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2534 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2535
2536 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2537 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2538 return VINF_SUCCESS;
2539}
2540
2541
2542/**
2543 * Performs a task switch.
2544 *
2545 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2546 * caller is responsible for performing the necessary checks (like DPL, TSS
2547 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2548 * reference for JMP, CALL, IRET.
2549 *
2550 * If the task switch is the due to a software interrupt or hardware exception,
2551 * the caller is responsible for validating the TSS selector and descriptor. See
2552 * Intel Instruction reference for INT n.
2553 *
2554 * @returns VBox strict status code.
2555 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2556 * @param enmTaskSwitch The cause of the task switch.
2557 * @param uNextEip The EIP effective after the task switch.
2558 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2559 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2560 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2561 * @param SelTss The TSS selector of the new task.
2562 * @param pNewDescTss Pointer to the new TSS descriptor.
2563 */
2564VBOXSTRICTRC
2565iemTaskSwitch(PVMCPUCC pVCpu,
2566 IEMTASKSWITCH enmTaskSwitch,
2567 uint32_t uNextEip,
2568 uint32_t fFlags,
2569 uint16_t uErr,
2570 uint64_t uCr2,
2571 RTSEL SelTss,
2572 PIEMSELDESC pNewDescTss) RT_NOEXCEPT
2573{
2574 Assert(!IEM_IS_REAL_MODE(pVCpu));
2575 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2576 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2577
2578 uint32_t const uNewTssType = pNewDescTss->Legacy.Gate.u4Type;
2579 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2580 || uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2581 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2582 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2583
2584 bool const fIsNewTss386 = ( uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2585 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2586
2587 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTss=%#x fIsNewTss386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTss,
2588 fIsNewTss386, pVCpu->cpum.GstCtx.eip, uNextEip));
2589
2590 /* Update CR2 in case it's a page-fault. */
2591 /** @todo This should probably be done much earlier in IEM/PGM. See
2592 * @bugref{5653#c49}. */
2593 if (fFlags & IEM_XCPT_FLAGS_CR2)
2594 pVCpu->cpum.GstCtx.cr2 = uCr2;
2595
2596 /*
2597 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2598 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2599 */
2600 uint32_t const uNewTssLimit = pNewDescTss->Legacy.Gen.u16LimitLow | (pNewDescTss->Legacy.Gen.u4LimitHigh << 16);
2601 uint32_t const uNewTssLimitMin = fIsNewTss386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2602 if (uNewTssLimit < uNewTssLimitMin)
2603 {
2604 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTssLimit=%#x uNewTssLimitMin=%#x -> #TS\n",
2605 enmTaskSwitch, uNewTssLimit, uNewTssLimitMin));
2606 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
2607 }
2608
2609 /*
2610 * Task switches in VMX non-root mode always cause task switches.
2611 * The new TSS must have been read and validated (DPL, limits etc.) before a
2612 * task-switch VM-exit commences.
2613 *
2614 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2615 */
2616 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2617 {
2618 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTss));
2619 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTss, uNextEip - pVCpu->cpum.GstCtx.eip);
2620 }
2621
2622 /*
2623 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2624 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2625 */
2626 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2627 {
2628 uint64_t const uExitInfo1 = SelTss;
2629 uint64_t uExitInfo2 = uErr;
2630 switch (enmTaskSwitch)
2631 {
2632 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2633 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2634 default: break;
2635 }
2636 if (fFlags & IEM_XCPT_FLAGS_ERR)
2637 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2638 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2639 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2640
2641 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2642 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2643 RT_NOREF2(uExitInfo1, uExitInfo2);
2644 }
2645
2646 /*
2647 * Check the current TSS limit. The last written byte to the current TSS during the
2648 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2649 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2650 *
2651 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2652 * end up with smaller than "legal" TSS limits.
2653 */
2654 uint32_t const uCurTssLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2655 uint32_t const uCurTssLimitMin = fIsNewTss386 ? 0x5F : 0x29;
2656 if (uCurTssLimit < uCurTssLimitMin)
2657 {
2658 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTssLimit=%#x uCurTssLimitMin=%#x -> #TS\n",
2659 enmTaskSwitch, uCurTssLimit, uCurTssLimitMin));
2660 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
2661 }
2662
2663 /*
2664 * Verify that the new TSS can be accessed and map it. Map only the required contents
2665 * and not the entire TSS.
2666 */
2667 uint8_t bUnmapInfoNewTss;
2668 void *pvNewTss;
2669 uint32_t const cbNewTss = uNewTssLimitMin + 1;
2670 RTGCPTR const GCPtrNewTss = X86DESC_BASE(&pNewDescTss->Legacy);
2671 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2672 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2673 * not perform correct translation if this happens. See Intel spec. 7.2.1
2674 * "Task-State Segment". */
2675 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTss, &bUnmapInfoNewTss, cbNewTss, UINT8_MAX, GCPtrNewTss, IEM_ACCESS_SYS_RW, 0);
2676/** @todo Not cleaning up bUnmapInfoNewTss mapping in any early exits here.
2677 * Consider wrapping the remainder into a function for simpler cleanup. */
2678 if (rcStrict != VINF_SUCCESS)
2679 {
2680 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTss=%u uNewTssLimit=%u rc=%Rrc\n", enmTaskSwitch,
2681 cbNewTss, uNewTssLimit, VBOXSTRICTRC_VAL(rcStrict)));
2682 return rcStrict;
2683 }
2684
2685 /*
2686 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2687 */
2688 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
2689 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2690 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2691 {
2692 uint8_t bUnmapInfoDescCurTss;
2693 PX86DESC pDescCurTss;
2694 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTss, &bUnmapInfoDescCurTss, sizeof(*pDescCurTss), UINT8_MAX,
2695 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2696 if (rcStrict != VINF_SUCCESS)
2697 {
2698 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2699 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2700 return rcStrict;
2701 }
2702
2703 pDescCurTss->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2704 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoDescCurTss);
2705 if (rcStrict != VINF_SUCCESS)
2706 {
2707 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2708 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2709 return rcStrict;
2710 }
2711
2712 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2713 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2714 {
2715 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2716 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2717 fEFlags &= ~X86_EFL_NT;
2718 }
2719 }
2720
2721 /*
2722 * Save the CPU state into the current TSS.
2723 */
2724 RTGCPTR const GCPtrCurTss = pVCpu->cpum.GstCtx.tr.u64Base;
2725 if (GCPtrNewTss == GCPtrCurTss)
2726 {
2727 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTss));
2728 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2729 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,
2730 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2731 pVCpu->cpum.GstCtx.ldtr.Sel));
2732 }
2733 if (fIsNewTss386)
2734 {
2735 /*
2736 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2737 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2738 */
2739 uint8_t bUnmapInfoCurTss32;
2740 void *pvCurTss32;
2741 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS32, eip);
2742 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2743 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2744 rcStrict = iemMemMap(pVCpu, &pvCurTss32, &bUnmapInfoCurTss32, cbCurTss, UINT8_MAX,
2745 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
2746 if (rcStrict != VINF_SUCCESS)
2747 {
2748 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
2749 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
2750 return rcStrict;
2751 }
2752
2753 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
2754 PX86TSS32 pCurTss32 = (PX86TSS32)((uintptr_t)pvCurTss32 - offCurTss);
2755 pCurTss32->eip = uNextEip;
2756 pCurTss32->eflags = fEFlags;
2757 pCurTss32->eax = pVCpu->cpum.GstCtx.eax;
2758 pCurTss32->ecx = pVCpu->cpum.GstCtx.ecx;
2759 pCurTss32->edx = pVCpu->cpum.GstCtx.edx;
2760 pCurTss32->ebx = pVCpu->cpum.GstCtx.ebx;
2761 pCurTss32->esp = pVCpu->cpum.GstCtx.esp;
2762 pCurTss32->ebp = pVCpu->cpum.GstCtx.ebp;
2763 pCurTss32->esi = pVCpu->cpum.GstCtx.esi;
2764 pCurTss32->edi = pVCpu->cpum.GstCtx.edi;
2765 pCurTss32->es = pVCpu->cpum.GstCtx.es.Sel;
2766 pCurTss32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2767 pCurTss32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2768 pCurTss32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2769 pCurTss32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2770 pCurTss32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2771
2772 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss32);
2773 if (rcStrict != VINF_SUCCESS)
2774 {
2775 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2776 VBOXSTRICTRC_VAL(rcStrict)));
2777 return rcStrict;
2778 }
2779 }
2780 else
2781 {
2782 /*
2783 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2784 */
2785 uint8_t bUnmapInfoCurTss16;
2786 void *pvCurTss16;
2787 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS16, ip);
2788 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2789 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2790 rcStrict = iemMemMap(pVCpu, &pvCurTss16, &bUnmapInfoCurTss16, cbCurTss, UINT8_MAX,
2791 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
2792 if (rcStrict != VINF_SUCCESS)
2793 {
2794 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
2795 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
2796 return rcStrict;
2797 }
2798
2799 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
2800 PX86TSS16 pCurTss16 = (PX86TSS16)((uintptr_t)pvCurTss16 - offCurTss);
2801 pCurTss16->ip = uNextEip;
2802 pCurTss16->flags = (uint16_t)fEFlags;
2803 pCurTss16->ax = pVCpu->cpum.GstCtx.ax;
2804 pCurTss16->cx = pVCpu->cpum.GstCtx.cx;
2805 pCurTss16->dx = pVCpu->cpum.GstCtx.dx;
2806 pCurTss16->bx = pVCpu->cpum.GstCtx.bx;
2807 pCurTss16->sp = pVCpu->cpum.GstCtx.sp;
2808 pCurTss16->bp = pVCpu->cpum.GstCtx.bp;
2809 pCurTss16->si = pVCpu->cpum.GstCtx.si;
2810 pCurTss16->di = pVCpu->cpum.GstCtx.di;
2811 pCurTss16->es = pVCpu->cpum.GstCtx.es.Sel;
2812 pCurTss16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2813 pCurTss16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2814 pCurTss16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2815
2816 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss16);
2817 if (rcStrict != VINF_SUCCESS)
2818 {
2819 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2820 VBOXSTRICTRC_VAL(rcStrict)));
2821 return rcStrict;
2822 }
2823 }
2824
2825 /*
2826 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2827 */
2828 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2829 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2830 {
2831 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2832 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTss;
2833 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2834 }
2835
2836 /*
2837 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2838 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2839 */
2840 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2841 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2842 bool fNewDebugTrap;
2843 if (fIsNewTss386)
2844 {
2845 PCX86TSS32 pNewTss32 = (PCX86TSS32)pvNewTss;
2846 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTss32->cr3 : 0;
2847 uNewEip = pNewTss32->eip;
2848 uNewEflags = pNewTss32->eflags;
2849 uNewEax = pNewTss32->eax;
2850 uNewEcx = pNewTss32->ecx;
2851 uNewEdx = pNewTss32->edx;
2852 uNewEbx = pNewTss32->ebx;
2853 uNewEsp = pNewTss32->esp;
2854 uNewEbp = pNewTss32->ebp;
2855 uNewEsi = pNewTss32->esi;
2856 uNewEdi = pNewTss32->edi;
2857 uNewES = pNewTss32->es;
2858 uNewCS = pNewTss32->cs;
2859 uNewSS = pNewTss32->ss;
2860 uNewDS = pNewTss32->ds;
2861 uNewFS = pNewTss32->fs;
2862 uNewGS = pNewTss32->gs;
2863 uNewLdt = pNewTss32->selLdt;
2864 fNewDebugTrap = RT_BOOL(pNewTss32->fDebugTrap);
2865 }
2866 else
2867 {
2868 PCX86TSS16 pNewTss16 = (PCX86TSS16)pvNewTss;
2869 uNewCr3 = 0;
2870 uNewEip = pNewTss16->ip;
2871 uNewEflags = pNewTss16->flags;
2872 uNewEax = UINT32_C(0xffff0000) | pNewTss16->ax;
2873 uNewEcx = UINT32_C(0xffff0000) | pNewTss16->cx;
2874 uNewEdx = UINT32_C(0xffff0000) | pNewTss16->dx;
2875 uNewEbx = UINT32_C(0xffff0000) | pNewTss16->bx;
2876 uNewEsp = UINT32_C(0xffff0000) | pNewTss16->sp;
2877 uNewEbp = UINT32_C(0xffff0000) | pNewTss16->bp;
2878 uNewEsi = UINT32_C(0xffff0000) | pNewTss16->si;
2879 uNewEdi = UINT32_C(0xffff0000) | pNewTss16->di;
2880 uNewES = pNewTss16->es;
2881 uNewCS = pNewTss16->cs;
2882 uNewSS = pNewTss16->ss;
2883 uNewDS = pNewTss16->ds;
2884 uNewFS = 0;
2885 uNewGS = 0;
2886 uNewLdt = pNewTss16->selLdt;
2887 fNewDebugTrap = false;
2888 }
2889
2890 if (GCPtrNewTss == GCPtrCurTss)
2891 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2892 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2893
2894 /*
2895 * We're done accessing the new TSS.
2896 */
2897 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
2898 if (rcStrict != VINF_SUCCESS)
2899 {
2900 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2901 return rcStrict;
2902 }
2903
2904 /*
2905 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2906 */
2907 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2908 {
2909 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTss, &bUnmapInfoNewTss, sizeof(*pNewDescTss), UINT8_MAX,
2910 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTss & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2911 if (rcStrict != VINF_SUCCESS)
2912 {
2913 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2914 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2915 return rcStrict;
2916 }
2917
2918 /* Check that the descriptor indicates the new TSS is available (not busy). */
2919 AssertMsg( pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2920 || pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2921 ("Invalid TSS descriptor type=%#x", pNewDescTss->Legacy.Gate.u4Type));
2922
2923 pNewDescTss->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2924 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
2925 if (rcStrict != VINF_SUCCESS)
2926 {
2927 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2928 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2929 return rcStrict;
2930 }
2931 }
2932
2933 /*
2934 * From this point on, we're technically in the new task. We will defer exceptions
2935 * until the completion of the task switch but before executing any instructions in the new task.
2936 */
2937 pVCpu->cpum.GstCtx.tr.Sel = SelTss;
2938 pVCpu->cpum.GstCtx.tr.ValidSel = SelTss;
2939 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2940 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTss->Legacy);
2941 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTss->Legacy);
2942 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTss->Legacy);
2943 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
2944
2945 /* Set the busy bit in TR. */
2946 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2947
2948 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2949 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2950 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2951 {
2952 uNewEflags |= X86_EFL_NT;
2953 }
2954
2955 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2956 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
2957 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
2958
2959 pVCpu->cpum.GstCtx.eip = uNewEip;
2960 pVCpu->cpum.GstCtx.eax = uNewEax;
2961 pVCpu->cpum.GstCtx.ecx = uNewEcx;
2962 pVCpu->cpum.GstCtx.edx = uNewEdx;
2963 pVCpu->cpum.GstCtx.ebx = uNewEbx;
2964 pVCpu->cpum.GstCtx.esp = uNewEsp;
2965 pVCpu->cpum.GstCtx.ebp = uNewEbp;
2966 pVCpu->cpum.GstCtx.esi = uNewEsi;
2967 pVCpu->cpum.GstCtx.edi = uNewEdi;
2968
2969 uNewEflags &= X86_EFL_LIVE_MASK;
2970 uNewEflags |= X86_EFL_RA1_MASK;
2971 IEMMISC_SET_EFL(pVCpu, uNewEflags);
2972
2973 /*
2974 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2975 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2976 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2977 */
2978 pVCpu->cpum.GstCtx.es.Sel = uNewES;
2979 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
2980
2981 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2982 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
2983
2984 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2985 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
2986
2987 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
2988 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
2989
2990 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
2991 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
2992
2993 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
2994 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
2995 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2996
2997 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
2998 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2999 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
3000 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
3001
3002 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3003 {
3004 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
3005 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
3006 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
3007 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
3008 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
3009 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
3010 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
3011 }
3012
3013 /*
3014 * Switch CR3 for the new task.
3015 */
3016 if ( fIsNewTss386
3017 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
3018 {
3019 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
3020 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
3021 AssertRCSuccessReturn(rc, rc);
3022
3023 /* Inform PGM. */
3024 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
3025 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
3026 AssertRCReturn(rc, rc);
3027 /* ignore informational status codes */
3028
3029 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
3030 }
3031
3032 /*
3033 * Switch LDTR for the new task.
3034 */
3035 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
3036 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
3037 else
3038 {
3039 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
3040
3041 IEMSELDESC DescNewLdt;
3042 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
3043 if (rcStrict != VINF_SUCCESS)
3044 {
3045 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
3046 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
3047 return rcStrict;
3048 }
3049 if ( !DescNewLdt.Legacy.Gen.u1Present
3050 || DescNewLdt.Legacy.Gen.u1DescType
3051 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
3052 {
3053 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
3054 uNewLdt, DescNewLdt.Legacy.u));
3055 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
3056 }
3057
3058 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
3059 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3060 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
3061 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
3062 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
3063 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3064 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
3065 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
3066 }
3067
3068 IEMSELDESC DescSS;
3069 if (IEM_IS_V86_MODE(pVCpu))
3070 {
3071 IEM_SET_CPL(pVCpu, 3);
3072 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
3073 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
3074 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
3075 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
3076 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
3077 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
3078
3079 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
3080 DescSS.Legacy.u = 0;
3081 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
3082 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
3083 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
3084 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
3085 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
3086 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
3087 DescSS.Legacy.Gen.u2Dpl = 3;
3088 }
3089 else
3090 {
3091 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
3092
3093 /*
3094 * Load the stack segment for the new task.
3095 */
3096 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
3097 {
3098 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
3099 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3100 }
3101
3102 /* Fetch the descriptor. */
3103 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
3104 if (rcStrict != VINF_SUCCESS)
3105 {
3106 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
3107 VBOXSTRICTRC_VAL(rcStrict)));
3108 return rcStrict;
3109 }
3110
3111 /* SS must be a data segment and writable. */
3112 if ( !DescSS.Legacy.Gen.u1DescType
3113 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3114 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
3115 {
3116 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
3117 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
3118 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3119 }
3120
3121 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
3122 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
3123 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
3124 {
3125 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
3126 uNewCpl));
3127 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3128 }
3129
3130 /* Is it there? */
3131 if (!DescSS.Legacy.Gen.u1Present)
3132 {
3133 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
3134 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3135 }
3136
3137 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
3138 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
3139
3140 /* Set the accessed bit before committing the result into SS. */
3141 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3142 {
3143 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
3144 if (rcStrict != VINF_SUCCESS)
3145 return rcStrict;
3146 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3147 }
3148
3149 /* Commit SS. */
3150 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
3151 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
3152 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3153 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
3154 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
3155 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3156 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
3157
3158 /* CPL has changed, update IEM before loading rest of segments. */
3159 IEM_SET_CPL(pVCpu, uNewCpl);
3160
3161 /*
3162 * Load the data segments for the new task.
3163 */
3164 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
3165 if (rcStrict != VINF_SUCCESS)
3166 return rcStrict;
3167 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
3168 if (rcStrict != VINF_SUCCESS)
3169 return rcStrict;
3170 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
3171 if (rcStrict != VINF_SUCCESS)
3172 return rcStrict;
3173 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
3174 if (rcStrict != VINF_SUCCESS)
3175 return rcStrict;
3176
3177 /*
3178 * Load the code segment for the new task.
3179 */
3180 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
3181 {
3182 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
3183 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3184 }
3185
3186 /* Fetch the descriptor. */
3187 IEMSELDESC DescCS;
3188 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
3189 if (rcStrict != VINF_SUCCESS)
3190 {
3191 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
3192 return rcStrict;
3193 }
3194
3195 /* CS must be a code segment. */
3196 if ( !DescCS.Legacy.Gen.u1DescType
3197 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3198 {
3199 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
3200 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
3201 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3202 }
3203
3204 /* For conforming CS, DPL must be less than or equal to the RPL. */
3205 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3206 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
3207 {
3208 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
3209 DescCS.Legacy.Gen.u2Dpl));
3210 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3211 }
3212
3213 /* For non-conforming CS, DPL must match RPL. */
3214 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3215 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
3216 {
3217 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
3218 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
3219 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3220 }
3221
3222 /* Is it there? */
3223 if (!DescCS.Legacy.Gen.u1Present)
3224 {
3225 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
3226 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3227 }
3228
3229 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
3230 u64Base = X86DESC_BASE(&DescCS.Legacy);
3231
3232 /* Set the accessed bit before committing the result into CS. */
3233 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3234 {
3235 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
3236 if (rcStrict != VINF_SUCCESS)
3237 return rcStrict;
3238 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3239 }
3240
3241 /* Commit CS. */
3242 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
3243 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
3244 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3245 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
3246 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
3247 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3248 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
3249 }
3250
3251 /* Make sure the CPU mode is correct. */
3252 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3253 if (fExecNew != pVCpu->iem.s.fExec)
3254 Log(("iemTaskSwitch: fExec %#x -> %#x (xor %#x)\n", pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3255 pVCpu->iem.s.fExec = fExecNew;
3256
3257 /** @todo Debug trap. */
3258 if (fIsNewTss386 && fNewDebugTrap)
3259 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
3260
3261 /*
3262 * Construct the error code masks based on what caused this task switch.
3263 * See Intel Instruction reference for INT.
3264 */
3265 uint16_t uExt;
3266 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
3267 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3268 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
3269 uExt = 1;
3270 else
3271 uExt = 0;
3272
3273 /*
3274 * Push any error code on to the new stack.
3275 */
3276 if (fFlags & IEM_XCPT_FLAGS_ERR)
3277 {
3278 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3279 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3280 uint8_t const cbStackFrame = fIsNewTss386 ? 4 : 2;
3281
3282 /* Check that there is sufficient space on the stack. */
3283 /** @todo Factor out segment limit checking for normal/expand down segments
3284 * into a separate function. */
3285 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3286 {
3287 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
3288 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
3289 {
3290 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3291 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
3292 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3293 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3294 }
3295 }
3296 else
3297 {
3298 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
3299 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3300 {
3301 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
3302 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3303 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3304 }
3305 }
3306
3307
3308 if (fIsNewTss386)
3309 rcStrict = iemMemStackPushU32(pVCpu, uErr);
3310 else
3311 rcStrict = iemMemStackPushU16(pVCpu, uErr);
3312 if (rcStrict != VINF_SUCCESS)
3313 {
3314 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
3315 fIsNewTss386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
3316 return rcStrict;
3317 }
3318 }
3319
3320 /* Check the new EIP against the new CS limit. */
3321 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
3322 {
3323 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
3324 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
3325 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3326 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
3327 }
3328
3329 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3330 pVCpu->cpum.GstCtx.ss.Sel));
3331 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3332}
3333
3334
3335/**
3336 * Implements exceptions and interrupts for protected mode.
3337 *
3338 * @returns VBox strict status code.
3339 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3340 * @param cbInstr The number of bytes to offset rIP by in the return
3341 * address.
3342 * @param u8Vector The interrupt / exception vector number.
3343 * @param fFlags The flags.
3344 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3345 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3346 */
3347static VBOXSTRICTRC
3348iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
3349 uint8_t cbInstr,
3350 uint8_t u8Vector,
3351 uint32_t fFlags,
3352 uint16_t uErr,
3353 uint64_t uCr2) RT_NOEXCEPT
3354{
3355 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3356
3357 /*
3358 * Read the IDT entry.
3359 */
3360 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3361 {
3362 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3363 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3364 }
3365 X86DESC Idte;
3366 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
3367 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
3368 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3369 {
3370 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3371 return rcStrict;
3372 }
3373 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x - from %04x:%08RX64 efl=%#x depth=%d\n",
3374 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3375 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow,
3376 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->iem.s.cXcptRecursions));
3377
3378 /*
3379 * Check the descriptor type, DPL and such.
3380 * ASSUMES this is done in the same order as described for call-gate calls.
3381 */
3382 if (Idte.Gate.u1DescType)
3383 {
3384 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3385 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3386 }
3387 bool fTaskGate = false;
3388 uint8_t f32BitGate = true;
3389 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3390 switch (Idte.Gate.u4Type)
3391 {
3392 case X86_SEL_TYPE_SYS_UNDEFINED:
3393 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3394 case X86_SEL_TYPE_SYS_LDT:
3395 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3396 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3397 case X86_SEL_TYPE_SYS_UNDEFINED2:
3398 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3399 case X86_SEL_TYPE_SYS_UNDEFINED3:
3400 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3401 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3402 case X86_SEL_TYPE_SYS_UNDEFINED4:
3403 {
3404 /** @todo check what actually happens when the type is wrong...
3405 * esp. call gates. */
3406 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3407 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3408 }
3409
3410 case X86_SEL_TYPE_SYS_286_INT_GATE:
3411 f32BitGate = false;
3412 RT_FALL_THRU();
3413 case X86_SEL_TYPE_SYS_386_INT_GATE:
3414 fEflToClear |= X86_EFL_IF;
3415 break;
3416
3417 case X86_SEL_TYPE_SYS_TASK_GATE:
3418 fTaskGate = true;
3419#ifndef IEM_IMPLEMENTS_TASKSWITCH
3420 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3421#endif
3422 break;
3423
3424 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3425 f32BitGate = false;
3426 break;
3427 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3428 break;
3429
3430 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3431 }
3432
3433 /* Check DPL against CPL if applicable. */
3434 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3435 {
3436 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3437 {
3438 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3439 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3440 }
3441 }
3442
3443 /* Is it there? */
3444 if (!Idte.Gate.u1Present)
3445 {
3446 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3447 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3448 }
3449
3450 /* Is it a task-gate? */
3451 if (fTaskGate)
3452 {
3453 /*
3454 * Construct the error code masks based on what caused this task switch.
3455 * See Intel Instruction reference for INT.
3456 */
3457 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3458 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
3459 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3460 RTSEL SelTss = Idte.Gate.u16Sel;
3461
3462 /*
3463 * Fetch the TSS descriptor in the GDT.
3464 */
3465 IEMSELDESC DescTSS;
3466 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTss, X86_XCPT_GP, (SelTss & uSelMask) | uExt);
3467 if (rcStrict != VINF_SUCCESS)
3468 {
3469 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTss,
3470 VBOXSTRICTRC_VAL(rcStrict)));
3471 return rcStrict;
3472 }
3473
3474 /* The TSS descriptor must be a system segment and be available (not busy). */
3475 if ( DescTSS.Legacy.Gen.u1DescType
3476 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3477 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3478 {
3479 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3480 u8Vector, SelTss, DescTSS.Legacy.au64));
3481 return iemRaiseGeneralProtectionFault(pVCpu, (SelTss & uSelMask) | uExt);
3482 }
3483
3484 /* The TSS must be present. */
3485 if (!DescTSS.Legacy.Gen.u1Present)
3486 {
3487 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTss, DescTSS.Legacy.au64));
3488 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTss & uSelMask) | uExt);
3489 }
3490
3491 /* Do the actual task switch. */
3492 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3493 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3494 fFlags, uErr, uCr2, SelTss, &DescTSS);
3495 }
3496
3497 /* A null CS is bad. */
3498 RTSEL NewCS = Idte.Gate.u16Sel;
3499 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3500 {
3501 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3502 return iemRaiseGeneralProtectionFault0(pVCpu);
3503 }
3504
3505 /* Fetch the descriptor for the new CS. */
3506 IEMSELDESC DescCS;
3507 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3508 if (rcStrict != VINF_SUCCESS)
3509 {
3510 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3511 return rcStrict;
3512 }
3513
3514 /* Must be a code segment. */
3515 if (!DescCS.Legacy.Gen.u1DescType)
3516 {
3517 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3518 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3519 }
3520 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3521 {
3522 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3523 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3524 }
3525
3526 /* Don't allow lowering the privilege level. */
3527 /** @todo Does the lowering of privileges apply to software interrupts
3528 * only? This has bearings on the more-privileged or
3529 * same-privilege stack behavior further down. A testcase would
3530 * be nice. */
3531 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3532 {
3533 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3534 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3535 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3536 }
3537
3538 /* Make sure the selector is present. */
3539 if (!DescCS.Legacy.Gen.u1Present)
3540 {
3541 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3542 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3543 }
3544
3545#ifdef LOG_ENABLED
3546 /* If software interrupt, try decode it if logging is enabled and such. */
3547 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3548 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
3549 iemLogSyscallProtModeInt(pVCpu, u8Vector, cbInstr);
3550#endif
3551
3552 /* Check the new EIP against the new CS limit. */
3553 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3554 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3555 ? Idte.Gate.u16OffsetLow
3556 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3557 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3558 if (uNewEip > cbLimitCS)
3559 {
3560 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3561 u8Vector, uNewEip, cbLimitCS, NewCS));
3562 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3563 }
3564 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3565
3566 /* Calc the flag image to push. */
3567 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3568 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3569 fEfl &= ~X86_EFL_RF;
3570 else
3571 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3572
3573 /* From V8086 mode only go to CPL 0. */
3574 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3575 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3576 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3577 {
3578 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3579 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3580 }
3581
3582 /*
3583 * If the privilege level changes, we need to get a new stack from the TSS.
3584 * This in turns means validating the new SS and ESP...
3585 */
3586 if (uNewCpl != IEM_GET_CPL(pVCpu))
3587 {
3588 RTSEL NewSS;
3589 uint32_t uNewEsp;
3590 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3591 if (rcStrict != VINF_SUCCESS)
3592 return rcStrict;
3593
3594 IEMSELDESC DescSS;
3595 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3596 if (rcStrict != VINF_SUCCESS)
3597 return rcStrict;
3598 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3599 if (!DescSS.Legacy.Gen.u1DefBig)
3600 {
3601 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3602 uNewEsp = (uint16_t)uNewEsp;
3603 }
3604
3605 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3606
3607 /* Check that there is sufficient space for the stack frame. */
3608 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3609 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3610 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3611 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3612
3613 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3614 {
3615 if ( uNewEsp - 1 > cbLimitSS
3616 || uNewEsp < cbStackFrame)
3617 {
3618 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3619 u8Vector, NewSS, uNewEsp, cbStackFrame));
3620 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3621 }
3622 }
3623 else
3624 {
3625 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3626 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3627 {
3628 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3629 u8Vector, NewSS, uNewEsp, cbStackFrame));
3630 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3631 }
3632 }
3633
3634 /*
3635 * Start making changes.
3636 */
3637
3638 /* Set the new CPL so that stack accesses use it. */
3639 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3640 IEM_SET_CPL(pVCpu, uNewCpl);
3641
3642 /* Create the stack frame. */
3643 uint8_t bUnmapInfoStackFrame;
3644 RTPTRUNION uStackFrame;
3645 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
3646 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
3647 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3648 if (rcStrict != VINF_SUCCESS)
3649 return rcStrict;
3650 if (f32BitGate)
3651 {
3652 if (fFlags & IEM_XCPT_FLAGS_ERR)
3653 *uStackFrame.pu32++ = uErr;
3654 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3655 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3656 uStackFrame.pu32[2] = fEfl;
3657 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3658 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3659 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3660 if (fEfl & X86_EFL_VM)
3661 {
3662 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3663 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3664 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3665 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3666 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3667 }
3668 }
3669 else
3670 {
3671 if (fFlags & IEM_XCPT_FLAGS_ERR)
3672 *uStackFrame.pu16++ = uErr;
3673 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3674 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3675 uStackFrame.pu16[2] = fEfl;
3676 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3677 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3678 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3679 if (fEfl & X86_EFL_VM)
3680 {
3681 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3682 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3683 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3684 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3685 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3686 }
3687 }
3688 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
3689 if (rcStrict != VINF_SUCCESS)
3690 return rcStrict;
3691
3692 /* Mark the selectors 'accessed' (hope this is the correct time). */
3693 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3694 * after pushing the stack frame? (Write protect the gdt + stack to
3695 * find out.) */
3696 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3697 {
3698 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3699 if (rcStrict != VINF_SUCCESS)
3700 return rcStrict;
3701 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3702 }
3703
3704 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3705 {
3706 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3707 if (rcStrict != VINF_SUCCESS)
3708 return rcStrict;
3709 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3710 }
3711
3712 /*
3713 * Start comitting the register changes (joins with the DPL=CPL branch).
3714 */
3715 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3716 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3717 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3718 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3719 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3720 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3721 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3722 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3723 * SP is loaded).
3724 * Need to check the other combinations too:
3725 * - 16-bit TSS, 32-bit handler
3726 * - 32-bit TSS, 16-bit handler */
3727 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3728 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3729 else
3730 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3731
3732 if (fEfl & X86_EFL_VM)
3733 {
3734 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3735 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3736 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3737 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3738 }
3739 }
3740 /*
3741 * Same privilege, no stack change and smaller stack frame.
3742 */
3743 else
3744 {
3745 uint64_t uNewRsp;
3746 uint8_t bUnmapInfoStackFrame;
3747 RTPTRUNION uStackFrame;
3748 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3749 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1,
3750 &uStackFrame.pv, &bUnmapInfoStackFrame, &uNewRsp);
3751 if (rcStrict != VINF_SUCCESS)
3752 return rcStrict;
3753
3754 if (f32BitGate)
3755 {
3756 if (fFlags & IEM_XCPT_FLAGS_ERR)
3757 *uStackFrame.pu32++ = uErr;
3758 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3759 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3760 uStackFrame.pu32[2] = fEfl;
3761 }
3762 else
3763 {
3764 if (fFlags & IEM_XCPT_FLAGS_ERR)
3765 *uStackFrame.pu16++ = uErr;
3766 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3767 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3768 uStackFrame.pu16[2] = fEfl;
3769 }
3770 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame); /* don't use the commit here */
3771 if (rcStrict != VINF_SUCCESS)
3772 return rcStrict;
3773
3774 /* Mark the CS selector as 'accessed'. */
3775 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3776 {
3777 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3778 if (rcStrict != VINF_SUCCESS)
3779 return rcStrict;
3780 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3781 }
3782
3783 /*
3784 * Start committing the register changes (joins with the other branch).
3785 */
3786 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3787 }
3788
3789 /* ... register committing continues. */
3790 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3791 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3792 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3793 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3794 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3795 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3796
3797 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3798 fEfl &= ~fEflToClear;
3799 IEMMISC_SET_EFL(pVCpu, fEfl);
3800
3801 if (fFlags & IEM_XCPT_FLAGS_CR2)
3802 pVCpu->cpum.GstCtx.cr2 = uCr2;
3803
3804 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3805 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3806
3807 /* Make sure the execution flags are correct. */
3808 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3809 if (fExecNew != pVCpu->iem.s.fExec)
3810 Log(("iemRaiseXcptOrIntInProtMode: fExec %#x -> %#x (xor %#x)\n",
3811 pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3812 pVCpu->iem.s.fExec = fExecNew;
3813 Assert(IEM_GET_CPL(pVCpu) == uNewCpl);
3814
3815 /*
3816 * Deal with debug events that follows the exception and clear inhibit flags.
3817 */
3818 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3819 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))
3820 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
3821 else
3822 {
3823 Log(("iemRaiseXcptOrIntInProtMode: Raising #DB after %#x; pending=%#x\n",
3824 u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));
3825 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
3826 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
3827 >> CPUMCTX_DBG_HIT_DRX_SHIFT;
3828 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
3829 return iemRaiseDebugException(pVCpu);
3830 }
3831
3832 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3833}
3834
3835
3836/**
3837 * Implements exceptions and interrupts for long mode.
3838 *
3839 * @returns VBox strict status code.
3840 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3841 * @param cbInstr The number of bytes to offset rIP by in the return
3842 * address.
3843 * @param u8Vector The interrupt / exception vector number.
3844 * @param fFlags The flags.
3845 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3846 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3847 */
3848static VBOXSTRICTRC
3849iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3850 uint8_t cbInstr,
3851 uint8_t u8Vector,
3852 uint32_t fFlags,
3853 uint16_t uErr,
3854 uint64_t uCr2) RT_NOEXCEPT
3855{
3856 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3857
3858 /*
3859 * Read the IDT entry.
3860 */
3861 uint16_t offIdt = (uint16_t)u8Vector << 4;
3862 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3863 {
3864 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3865 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3866 }
3867 X86DESC64 Idte;
3868#ifdef _MSC_VER /* Shut up silly compiler warning. */
3869 Idte.au64[0] = 0;
3870 Idte.au64[1] = 0;
3871#endif
3872 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3873 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3874 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3875 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3876 {
3877 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3878 return rcStrict;
3879 }
3880 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3881 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3882 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3883
3884 /*
3885 * Check the descriptor type, DPL and such.
3886 * ASSUMES this is done in the same order as described for call-gate calls.
3887 */
3888 if (Idte.Gate.u1DescType)
3889 {
3890 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3891 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3892 }
3893 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3894 switch (Idte.Gate.u4Type)
3895 {
3896 case AMD64_SEL_TYPE_SYS_INT_GATE:
3897 fEflToClear |= X86_EFL_IF;
3898 break;
3899 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3900 break;
3901
3902 default:
3903 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3904 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3905 }
3906
3907 /* Check DPL against CPL if applicable. */
3908 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3909 {
3910 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3911 {
3912 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3913 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3914 }
3915 }
3916
3917 /* Is it there? */
3918 if (!Idte.Gate.u1Present)
3919 {
3920 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3921 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3922 }
3923
3924 /* A null CS is bad. */
3925 RTSEL NewCS = Idte.Gate.u16Sel;
3926 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3927 {
3928 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3929 return iemRaiseGeneralProtectionFault0(pVCpu);
3930 }
3931
3932 /* Fetch the descriptor for the new CS. */
3933 IEMSELDESC DescCS;
3934 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
3935 if (rcStrict != VINF_SUCCESS)
3936 {
3937 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3938 return rcStrict;
3939 }
3940
3941 /* Must be a 64-bit code segment. */
3942 if (!DescCS.Long.Gen.u1DescType)
3943 {
3944 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3945 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3946 }
3947 if ( !DescCS.Long.Gen.u1Long
3948 || DescCS.Long.Gen.u1DefBig
3949 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3950 {
3951 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3952 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3953 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3954 }
3955
3956 /* Don't allow lowering the privilege level. For non-conforming CS
3957 selectors, the CS.DPL sets the privilege level the trap/interrupt
3958 handler runs at. For conforming CS selectors, the CPL remains
3959 unchanged, but the CS.DPL must be <= CPL. */
3960 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3961 * when CPU in Ring-0. Result \#GP? */
3962 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3963 {
3964 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3965 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3966 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3967 }
3968
3969
3970 /* Make sure the selector is present. */
3971 if (!DescCS.Legacy.Gen.u1Present)
3972 {
3973 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3974 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3975 }
3976
3977 /* Check that the new RIP is canonical. */
3978 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3979 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3980 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3981 if (!IEM_IS_CANONICAL(uNewRip))
3982 {
3983 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3984 return iemRaiseGeneralProtectionFault0(pVCpu);
3985 }
3986
3987 /*
3988 * If the privilege level changes or if the IST isn't zero, we need to get
3989 * a new stack from the TSS.
3990 */
3991 uint64_t uNewRsp;
3992 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3993 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3994 if ( uNewCpl != IEM_GET_CPL(pVCpu)
3995 || Idte.Gate.u3IST != 0)
3996 {
3997 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3998 if (rcStrict != VINF_SUCCESS)
3999 return rcStrict;
4000 }
4001 else
4002 uNewRsp = pVCpu->cpum.GstCtx.rsp;
4003 uNewRsp &= ~(uint64_t)0xf;
4004
4005 /*
4006 * Calc the flag image to push.
4007 */
4008 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
4009 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4010 fEfl &= ~X86_EFL_RF;
4011 else
4012 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4013
4014 /*
4015 * Start making changes.
4016 */
4017 /* Set the new CPL so that stack accesses use it. */
4018 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
4019 IEM_SET_CPL(pVCpu, uNewCpl);
4020/** @todo Setting CPL this early seems wrong as it would affect and errors we
4021 * raise accessing the stack and (?) GDT/LDT... */
4022
4023 /* Create the stack frame. */
4024 uint8_t bUnmapInfoStackFrame;
4025 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
4026 RTPTRUNION uStackFrame;
4027 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
4028 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
4029 if (rcStrict != VINF_SUCCESS)
4030 return rcStrict;
4031
4032 if (fFlags & IEM_XCPT_FLAGS_ERR)
4033 *uStackFrame.pu64++ = uErr;
4034 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
4035 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
4036 uStackFrame.pu64[2] = fEfl;
4037 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
4038 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
4039 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
4040 if (rcStrict != VINF_SUCCESS)
4041 return rcStrict;
4042
4043 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
4044 /** @todo testcase: excatly _when_ are the accessed bits set - before or
4045 * after pushing the stack frame? (Write protect the gdt + stack to
4046 * find out.) */
4047 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4048 {
4049 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4050 if (rcStrict != VINF_SUCCESS)
4051 return rcStrict;
4052 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4053 }
4054
4055 /*
4056 * Start comitting the register changes.
4057 */
4058 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
4059 * hidden registers when interrupting 32-bit or 16-bit code! */
4060 if (uNewCpl != uOldCpl)
4061 {
4062 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
4063 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
4064 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4065 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
4066 pVCpu->cpum.GstCtx.ss.u64Base = 0;
4067 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
4068 }
4069 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
4070 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4071 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4072 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4073 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
4074 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
4075 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4076 pVCpu->cpum.GstCtx.rip = uNewRip;
4077
4078 fEfl &= ~fEflToClear;
4079 IEMMISC_SET_EFL(pVCpu, fEfl);
4080
4081 if (fFlags & IEM_XCPT_FLAGS_CR2)
4082 pVCpu->cpum.GstCtx.cr2 = uCr2;
4083
4084 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
4085 iemRaiseXcptAdjustState(pVCpu, u8Vector);
4086
4087 iemRecalcExecModeAndCplAndAcFlags(pVCpu);
4088
4089 /*
4090 * Deal with debug events that follows the exception and clear inhibit flags.
4091 */
4092 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4093 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))
4094 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
4095 else
4096 {
4097 Log(("iemRaiseXcptOrIntInLongMode: Raising #DB after %#x; pending=%#x\n",
4098 u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));
4099 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
4100 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
4101 >> CPUMCTX_DBG_HIT_DRX_SHIFT;
4102 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
4103 return iemRaiseDebugException(pVCpu);
4104 }
4105
4106 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4107}
4108
4109
4110/**
4111 * Implements exceptions and interrupts.
4112 *
4113 * All exceptions and interrupts goes thru this function!
4114 *
4115 * @returns VBox strict status code.
4116 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4117 * @param cbInstr The number of bytes to offset rIP by in the return
4118 * address.
4119 * @param u8Vector The interrupt / exception vector number.
4120 * @param fFlags The flags.
4121 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4122 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4123 */
4124VBOXSTRICTRC
4125iemRaiseXcptOrInt(PVMCPUCC pVCpu,
4126 uint8_t cbInstr,
4127 uint8_t u8Vector,
4128 uint32_t fFlags,
4129 uint16_t uErr,
4130 uint64_t uCr2) RT_NOEXCEPT
4131{
4132 /*
4133 * Get all the state that we might need here.
4134 */
4135 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4136 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4137
4138#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
4139 /*
4140 * Flush prefetch buffer
4141 */
4142 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
4143#endif
4144
4145 /*
4146 * Perform the V8086 IOPL check and upgrade the fault without nesting.
4147 */
4148 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
4149 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
4150 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
4151 | IEM_XCPT_FLAGS_BP_INSTR
4152 | IEM_XCPT_FLAGS_ICEBP_INSTR
4153 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
4154 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
4155 {
4156 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
4157 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
4158 u8Vector = X86_XCPT_GP;
4159 uErr = 0;
4160 }
4161
4162 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
4163#ifdef DBGFTRACE_ENABLED
4164 RTTraceBufAddMsgF(pVM->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
4165 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
4166 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
4167#endif
4168
4169 /*
4170 * Check if DBGF wants to intercept the exception.
4171 */
4172 if ( (fFlags & (IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_T_SOFT_INT))
4173 || !DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector)) )
4174 { /* likely */ }
4175 else
4176 {
4177 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector),
4178 DBGFEVENTCTX_INVALID, 1, (uint64_t)uErr);
4179 if (rcStrict != VINF_SUCCESS)
4180 return rcStrict;
4181 }
4182
4183 /*
4184 * Evaluate whether NMI blocking should be in effect.
4185 * Normally, NMI blocking is in effect whenever we inject an NMI.
4186 */
4187 bool fBlockNmi = u8Vector == X86_XCPT_NMI
4188 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);
4189
4190#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4191 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4192 {
4193 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
4194 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4195 return rcStrict0;
4196
4197 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
4198 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
4199 {
4200 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
4201 fBlockNmi = false;
4202 }
4203 }
4204#endif
4205
4206#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
4207 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
4208 {
4209 /*
4210 * If the event is being injected as part of VMRUN, it isn't subject to event
4211 * intercepts in the nested-guest. However, secondary exceptions that occur
4212 * during injection of any event -are- subject to exception intercepts.
4213 *
4214 * See AMD spec. 15.20 "Event Injection".
4215 */
4216 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
4217 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
4218 else
4219 {
4220 /*
4221 * Check and handle if the event being raised is intercepted.
4222 */
4223 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4224 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
4225 return rcStrict0;
4226 }
4227 }
4228#endif
4229
4230 /*
4231 * Set NMI blocking if necessary.
4232 */
4233 if (fBlockNmi)
4234 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
4235
4236 /*
4237 * Do recursion accounting.
4238 */
4239 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
4240 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
4241 if (pVCpu->iem.s.cXcptRecursions == 0)
4242 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
4243 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
4244 else
4245 {
4246 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
4247 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
4248 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
4249
4250 if (pVCpu->iem.s.cXcptRecursions >= 4)
4251 {
4252#ifdef DEBUG_bird
4253 AssertFailed();
4254#endif
4255 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
4256 }
4257
4258 /*
4259 * Evaluate the sequence of recurring events.
4260 */
4261 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
4262 NULL /* pXcptRaiseInfo */);
4263 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
4264 { /* likely */ }
4265 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
4266 {
4267 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
4268 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
4269 u8Vector = X86_XCPT_DF;
4270 uErr = 0;
4271#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4272 /* VMX nested-guest #DF intercept needs to be checked here. */
4273 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4274 {
4275 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
4276 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4277 return rcStrict0;
4278 }
4279#endif
4280 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
4281 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
4282 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4283 }
4284 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
4285 {
4286 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
4287 return iemInitiateCpuShutdown(pVCpu);
4288 }
4289 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
4290 {
4291 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
4292 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
4293 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
4294 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
4295 return VERR_EM_GUEST_CPU_HANG;
4296 }
4297 else
4298 {
4299 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
4300 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
4301 return VERR_IEM_IPE_9;
4302 }
4303
4304 /*
4305 * The 'EXT' bit is set when an exception occurs during deliver of an external
4306 * event (such as an interrupt or earlier exception)[1]. Privileged software
4307 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
4308 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
4309 *
4310 * [1] - Intel spec. 6.13 "Error Code"
4311 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
4312 * [3] - Intel Instruction reference for INT n.
4313 */
4314 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
4315 && (fFlags & IEM_XCPT_FLAGS_ERR)
4316 && u8Vector != X86_XCPT_PF
4317 && u8Vector != X86_XCPT_DF)
4318 {
4319 uErr |= X86_TRAP_ERR_EXTERNAL;
4320 }
4321 }
4322
4323 pVCpu->iem.s.cXcptRecursions++;
4324 pVCpu->iem.s.uCurXcpt = u8Vector;
4325 pVCpu->iem.s.fCurXcpt = fFlags;
4326 pVCpu->iem.s.uCurXcptErr = uErr;
4327 pVCpu->iem.s.uCurXcptCr2 = uCr2;
4328
4329 /*
4330 * Extensive logging.
4331 */
4332#if defined(LOG_ENABLED) && defined(IN_RING3)
4333 if (LogIs3Enabled())
4334 {
4335 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
4336 char szRegs[4096];
4337 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4338 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4339 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4340 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4341 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4342 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4343 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4344 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4345 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4346 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4347 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4348 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4349 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4350 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4351 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4352 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4353 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4354 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4355 " efer=%016VR{efer}\n"
4356 " pat=%016VR{pat}\n"
4357 " sf_mask=%016VR{sf_mask}\n"
4358 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4359 " lstar=%016VR{lstar}\n"
4360 " star=%016VR{star} cstar=%016VR{cstar}\n"
4361 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4362 );
4363
4364 char szInstr[256];
4365 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4366 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4367 szInstr, sizeof(szInstr), NULL);
4368 Log3(("%s%s\n", szRegs, szInstr));
4369 }
4370#endif /* LOG_ENABLED */
4371
4372 /*
4373 * Stats.
4374 */
4375 uint64_t const uTimestamp = ASMReadTSC();
4376 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
4377 {
4378 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
4379 EMHistoryAddExit(pVCpu,
4380 fFlags & IEM_XCPT_FLAGS_T_EXT_INT
4381 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_IEM, u8Vector)
4382 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_IEM, u8Vector | 0x100),
4383 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, uTimestamp);
4384 }
4385 else
4386 {
4387 if (u8Vector < RT_ELEMENTS(pVCpu->iem.s.aStatXcpts))
4388 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
4389 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
4390 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, uTimestamp);
4391 if (fFlags & IEM_XCPT_FLAGS_ERR)
4392 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector | EMEXIT_F_XCPT_ERRCD), uErr, uTimestamp);
4393 if (fFlags & IEM_XCPT_FLAGS_CR2)
4394 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector | EMEXIT_F_XCPT_CR2), uCr2, uTimestamp);
4395 }
4396
4397 /*
4398 * Hack alert! Convert incoming debug events to slient on Intel.
4399 * See the dbg+inhibit+ringxfer test in bs3-cpu-weird-1.
4400 */
4401 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4402 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
4403 || !IEM_IS_GUEST_CPU_INTEL(pVCpu))
4404 { /* ignore */ }
4405 else
4406 {
4407 Log(("iemRaiseXcptOrInt: Converting pending %#x debug events to a silent one (intel hack); vec=%#x\n",
4408 pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK, u8Vector));
4409 pVCpu->cpum.GstCtx.eflags.uBoth = (pVCpu->cpum.GstCtx.eflags.uBoth & ~CPUMCTX_DBG_HIT_DRX_MASK)
4410 | CPUMCTX_DBG_HIT_DRX_SILENT;
4411 }
4412
4413 /*
4414 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
4415 * to ensure that a stale TLB or paging cache entry will only cause one
4416 * spurious #PF.
4417 */
4418 if ( u8Vector == X86_XCPT_PF
4419 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
4420 IEMTlbInvalidatePage(pVCpu, uCr2);
4421
4422 /*
4423 * Call the mode specific worker function.
4424 */
4425 VBOXSTRICTRC rcStrict;
4426 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4427 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4428 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
4429 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4430 else
4431 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4432
4433 /* Flush the prefetch buffer. */
4434 iemOpcodeFlushHeavy(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
4435
4436 /*
4437 * Unwind.
4438 */
4439 pVCpu->iem.s.cXcptRecursions--;
4440 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
4441 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
4442 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
4443 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel,
4444 pVCpu->cpum.GstCtx.esp, IEM_GET_CPL(pVCpu), pVCpu->iem.s.cXcptRecursions + 1));
4445 return rcStrict;
4446}
4447
4448#ifdef IEM_WITH_SETJMP
4449/**
4450 * See iemRaiseXcptOrInt. Will not return.
4451 */
4452DECL_NO_RETURN(void)
4453iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
4454 uint8_t cbInstr,
4455 uint8_t u8Vector,
4456 uint32_t fFlags,
4457 uint16_t uErr,
4458 uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP
4459{
4460 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4461 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
4462}
4463#endif
4464
4465
4466/** \#DE - 00. */
4467VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
4468{
4469 if (GCMIsInterceptingXcptDE(pVCpu))
4470 {
4471 int rc = GCMXcptDE(pVCpu, &pVCpu->cpum.GstCtx);
4472 if (rc == VINF_SUCCESS)
4473 {
4474 Log(("iemRaiseDivideError: Restarting instruction because of GCMXcptDE\n"));
4475 return VINF_IEM_RAISED_XCPT; /* must return non-zero status here to cause a instruction restart */
4476 }
4477 }
4478 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4479}
4480
4481
4482#ifdef IEM_WITH_SETJMP
4483/** \#DE - 00. */
4484DECL_NO_RETURN(void) iemRaiseDivideErrorJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4485{
4486 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4487}
4488#endif
4489
4490
4491/** \#DB - 01.
4492 * @note This automatically clear DR7.GD. */
4493VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
4494{
4495 /* This always clears RF (via IEM_XCPT_FLAGS_DRx_INSTR_BP). */
4496 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
4497 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_DRx_INSTR_BP, 0, 0);
4498}
4499
4500
4501/** \#BR - 05. */
4502VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
4503{
4504 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4505}
4506
4507
4508/** \#UD - 06. */
4509VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
4510{
4511 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4512}
4513
4514
4515#ifdef IEM_WITH_SETJMP
4516/** \#UD - 06. */
4517DECL_NO_RETURN(void) iemRaiseUndefinedOpcodeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4518{
4519 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4520}
4521#endif
4522
4523
4524/** \#NM - 07. */
4525VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
4526{
4527 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4528}
4529
4530
4531#ifdef IEM_WITH_SETJMP
4532/** \#NM - 07. */
4533DECL_NO_RETURN(void) iemRaiseDeviceNotAvailableJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4534{
4535 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4536}
4537#endif
4538
4539
4540/** \#TS(err) - 0a. */
4541VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4542{
4543 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4544}
4545
4546
4547/** \#TS(tr) - 0a. */
4548VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
4549{
4550 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4551 pVCpu->cpum.GstCtx.tr.Sel, 0);
4552}
4553
4554
4555/** \#TS(0) - 0a. */
4556VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4557{
4558 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4559 0, 0);
4560}
4561
4562
4563/** \#TS(err) - 0a. */
4564VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4565{
4566 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4567 uSel & X86_SEL_MASK_OFF_RPL, 0);
4568}
4569
4570
4571/** \#NP(err) - 0b. */
4572VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4573{
4574 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4575}
4576
4577
4578/** \#NP(sel) - 0b. */
4579VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4580{
4581 Log(("iemRaiseSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4582 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4583 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4584 uSel & ~X86_SEL_RPL, 0);
4585}
4586
4587
4588/** \#SS(seg) - 0c. */
4589VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4590{
4591 Log(("iemRaiseStackSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4592 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4593 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4594 uSel & ~X86_SEL_RPL, 0);
4595}
4596
4597
4598/** \#SS(err) - 0c. */
4599VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4600{
4601 Log(("iemRaiseStackSelectorNotPresentWithErr: cs:rip=%04x:%RX64 uErr=%#x\n",
4602 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4603 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4604}
4605
4606
4607/** \#GP(n) - 0d. */
4608VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4609{
4610 Log(("iemRaiseGeneralProtectionFault: cs:rip=%04x:%RX64 uErr=%#x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4611 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4612}
4613
4614
4615/** \#GP(0) - 0d. */
4616VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4617{
4618 Log(("iemRaiseGeneralProtectionFault0: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4619 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4620}
4621
4622#ifdef IEM_WITH_SETJMP
4623/** \#GP(0) - 0d. */
4624DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4625{
4626 Log(("iemRaiseGeneralProtectionFault0Jmp: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4627 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4628}
4629#endif
4630
4631
4632/** \#GP(sel) - 0d. */
4633VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4634{
4635 Log(("iemRaiseGeneralProtectionFaultBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
4636 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4637 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4638 Sel & ~X86_SEL_RPL, 0);
4639}
4640
4641
4642/** \#GP(0) - 0d. */
4643VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4644{
4645 Log(("iemRaiseNotCanonical: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4646 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4647}
4648
4649
4650/** \#GP(sel) - 0d. */
4651VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4652{
4653 Log(("iemRaiseSelectorBounds: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4654 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4655 NOREF(iSegReg); NOREF(fAccess);
4656 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4657 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4658}
4659
4660#ifdef IEM_WITH_SETJMP
4661/** \#GP(sel) - 0d, longjmp. */
4662DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4663{
4664 Log(("iemRaiseSelectorBoundsJmp: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4665 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4666 NOREF(iSegReg); NOREF(fAccess);
4667 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4668 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4669}
4670#endif
4671
4672/** \#GP(sel) - 0d. */
4673VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4674{
4675 Log(("iemRaiseSelectorBoundsBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
4676 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4677 NOREF(Sel);
4678 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4679}
4680
4681#ifdef IEM_WITH_SETJMP
4682/** \#GP(sel) - 0d, longjmp. */
4683DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP
4684{
4685 Log(("iemRaiseSelectorBoundsBySelectorJmp: cs:rip=%04x:%RX64 Sel=%#x\n",
4686 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4687 NOREF(Sel);
4688 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4689}
4690#endif
4691
4692
4693/** \#GP(sel) - 0d. */
4694VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4695{
4696 Log(("iemRaiseSelectorInvalidAccess: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4697 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4698 NOREF(iSegReg); NOREF(fAccess);
4699 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4700}
4701
4702#ifdef IEM_WITH_SETJMP
4703/** \#GP(sel) - 0d, longjmp. */
4704DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4705{
4706 NOREF(iSegReg); NOREF(fAccess);
4707 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4708}
4709#endif
4710
4711
4712/** \#PF(n) - 0e. */
4713VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT
4714{
4715 uint16_t uErr;
4716 switch (rc)
4717 {
4718 case VERR_PAGE_NOT_PRESENT:
4719 case VERR_PAGE_TABLE_NOT_PRESENT:
4720 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4721 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4722 uErr = 0;
4723 break;
4724
4725 case VERR_RESERVED_PAGE_TABLE_BITS:
4726 uErr = X86_TRAP_PF_P | X86_TRAP_PF_RSVD;
4727 break;
4728
4729 default:
4730 AssertMsgFailed(("%Rrc\n", rc));
4731 RT_FALL_THRU();
4732 case VERR_ACCESS_DENIED:
4733 uErr = X86_TRAP_PF_P;
4734 break;
4735 }
4736
4737 if (IEM_GET_CPL(pVCpu) == 3)
4738 uErr |= X86_TRAP_PF_US;
4739
4740 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4741 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4742 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4743 uErr |= X86_TRAP_PF_ID;
4744
4745#if 0 /* This is so much non-sense, really. Why was it done like that? */
4746 /* Note! RW access callers reporting a WRITE protection fault, will clear
4747 the READ flag before calling. So, read-modify-write accesses (RW)
4748 can safely be reported as READ faults. */
4749 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4750 uErr |= X86_TRAP_PF_RW;
4751#else
4752 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4753 {
4754 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4755 /// (regardless of outcome of the comparison in the latter case).
4756 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4757 uErr |= X86_TRAP_PF_RW;
4758 }
4759#endif
4760
4761 /* For FXSAVE and FRSTOR the #PF is typically reported at the max address
4762 of the memory operand rather than at the start of it. (Not sure what
4763 happens if it crosses a page boundrary.) The current heuristics for
4764 this is to report the #PF for the last byte if the access is more than
4765 64 bytes. This is probably not correct, but we can work that out later,
4766 main objective now is to get FXSAVE to work like for real hardware and
4767 make bs3-cpu-basic2 work. */
4768 if (cbAccess <= 64)
4769 { /* likely*/ }
4770 else
4771 GCPtrWhere += cbAccess - 1;
4772
4773 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4774 uErr, GCPtrWhere);
4775}
4776
4777#ifdef IEM_WITH_SETJMP
4778/** \#PF(n) - 0e, longjmp. */
4779DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess,
4780 uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP
4781{
4782 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, cbAccess, fAccess, rc)));
4783}
4784#endif
4785
4786
4787/** \#MF(0) - 10. */
4788VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT
4789{
4790 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)
4791 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4792
4793 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
4794 PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
4795 return iemRegUpdateRipAndFinishClearingRF(pVCpu);
4796}
4797
4798#ifdef IEM_WITH_SETJMP
4799/** \#MF(0) - 10, longjmp. */
4800DECL_NO_RETURN(void) iemRaiseMathFaultJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4801{
4802 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseMathFault(pVCpu)));
4803}
4804#endif
4805
4806
4807/** \#AC(0) - 11. */
4808VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT
4809{
4810 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4811}
4812
4813#ifdef IEM_WITH_SETJMP
4814/** \#AC(0) - 11, longjmp. */
4815DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4816{
4817 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4818}
4819#endif
4820
4821
4822/** \#XF(0)/\#XM(0) - 19. */
4823VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
4824{
4825 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4826}
4827
4828
4829#ifdef IEM_WITH_SETJMP
4830/** \#XF(0)/\#XM(0) - 19s, longjmp. */
4831DECL_NO_RETURN(void) iemRaiseSimdFpExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4832{
4833 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseSimdFpException(pVCpu)));
4834}
4835#endif
4836
4837
4838/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4839IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4840{
4841 NOREF(cbInstr);
4842 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4843}
4844
4845
4846/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4847IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4848{
4849 NOREF(cbInstr);
4850 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4851}
4852
4853
4854/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4855IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4856{
4857 NOREF(cbInstr);
4858 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4859}
4860
4861
4862/** @} */
4863
4864/** @name Common opcode decoders.
4865 * @{
4866 */
4867//#include <iprt/mem.h>
4868
4869/**
4870 * Used to add extra details about a stub case.
4871 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4872 */
4873void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4874{
4875#if defined(LOG_ENABLED) && defined(IN_RING3)
4876 PVM pVM = pVCpu->CTX_SUFF(pVM);
4877 char szRegs[4096];
4878 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4879 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4880 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4881 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4882 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4883 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4884 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4885 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4886 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4887 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4888 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4889 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4890 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4891 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4892 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4893 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4894 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4895 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4896 " efer=%016VR{efer}\n"
4897 " pat=%016VR{pat}\n"
4898 " sf_mask=%016VR{sf_mask}\n"
4899 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4900 " lstar=%016VR{lstar}\n"
4901 " star=%016VR{star} cstar=%016VR{cstar}\n"
4902 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4903 );
4904
4905 char szInstr[256];
4906 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4907 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4908 szInstr, sizeof(szInstr), NULL);
4909
4910 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4911#else
4912 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4913#endif
4914}
4915
4916/** @} */
4917
4918
4919
4920/** @name Register Access.
4921 * @{
4922 */
4923
4924/**
4925 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4926 *
4927 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4928 * segment limit.
4929 *
4930 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4931 * @param cbInstr Instruction size.
4932 * @param offNextInstr The offset of the next instruction.
4933 * @param enmEffOpSize Effective operand size.
4934 */
4935VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
4936 IEMMODE enmEffOpSize) RT_NOEXCEPT
4937{
4938 switch (enmEffOpSize)
4939 {
4940 case IEMMODE_16BIT:
4941 {
4942 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
4943 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4944 || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */))
4945 pVCpu->cpum.GstCtx.rip = uNewIp;
4946 else
4947 return iemRaiseGeneralProtectionFault0(pVCpu);
4948 break;
4949 }
4950
4951 case IEMMODE_32BIT:
4952 {
4953 Assert(!IEM_IS_64BIT_CODE(pVCpu));
4954 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4955
4956 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
4957 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4958 pVCpu->cpum.GstCtx.rip = uNewEip;
4959 else
4960 return iemRaiseGeneralProtectionFault0(pVCpu);
4961 break;
4962 }
4963
4964 case IEMMODE_64BIT:
4965 {
4966 Assert(IEM_IS_64BIT_CODE(pVCpu));
4967
4968 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4969 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4970 pVCpu->cpum.GstCtx.rip = uNewRip;
4971 else
4972 return iemRaiseGeneralProtectionFault0(pVCpu);
4973 break;
4974 }
4975
4976 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4977 }
4978
4979#ifndef IEM_WITH_CODE_TLB
4980 /* Flush the prefetch buffer. */
4981 pVCpu->iem.s.cbOpcode = cbInstr;
4982#endif
4983
4984 /*
4985 * Clear RF and finish the instruction (maybe raise #DB).
4986 */
4987 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
4988}
4989
4990
4991/**
4992 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4993 *
4994 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4995 * segment limit.
4996 *
4997 * @returns Strict VBox status code.
4998 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4999 * @param cbInstr Instruction size.
5000 * @param offNextInstr The offset of the next instruction.
5001 */
5002VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT
5003{
5004 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
5005
5006 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
5007 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
5008 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checking in 64-bit mode */))
5009 pVCpu->cpum.GstCtx.rip = uNewIp;
5010 else
5011 return iemRaiseGeneralProtectionFault0(pVCpu);
5012
5013#ifndef IEM_WITH_CODE_TLB
5014 /* Flush the prefetch buffer. */
5015 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5016#endif
5017
5018 /*
5019 * Clear RF and finish the instruction (maybe raise #DB).
5020 */
5021 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
5022}
5023
5024
5025/**
5026 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
5027 *
5028 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5029 * segment limit.
5030 *
5031 * @returns Strict VBox status code.
5032 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5033 * @param cbInstr Instruction size.
5034 * @param offNextInstr The offset of the next instruction.
5035 * @param enmEffOpSize Effective operand size.
5036 */
5037VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
5038 IEMMODE enmEffOpSize) RT_NOEXCEPT
5039{
5040 if (enmEffOpSize == IEMMODE_32BIT)
5041 {
5042 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
5043
5044 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
5045 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
5046 pVCpu->cpum.GstCtx.rip = uNewEip;
5047 else
5048 return iemRaiseGeneralProtectionFault0(pVCpu);
5049 }
5050 else
5051 {
5052 Assert(enmEffOpSize == IEMMODE_64BIT);
5053
5054 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
5055 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
5056 pVCpu->cpum.GstCtx.rip = uNewRip;
5057 else
5058 return iemRaiseGeneralProtectionFault0(pVCpu);
5059 }
5060
5061#ifndef IEM_WITH_CODE_TLB
5062 /* Flush the prefetch buffer. */
5063 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5064#endif
5065
5066 /*
5067 * Clear RF and finish the instruction (maybe raise #DB).
5068 */
5069 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
5070}
5071
5072/** @} */
5073
5074
5075/** @name FPU access and helpers.
5076 *
5077 * @{
5078 */
5079
5080/**
5081 * Updates the x87.DS and FPUDP registers.
5082 *
5083 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5084 * @param pFpuCtx The FPU context.
5085 * @param iEffSeg The effective segment register.
5086 * @param GCPtrEff The effective address relative to @a iEffSeg.
5087 */
5088DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5089{
5090 RTSEL sel;
5091 switch (iEffSeg)
5092 {
5093 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
5094 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
5095 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
5096 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
5097 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
5098 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
5099 default:
5100 AssertMsgFailed(("%d\n", iEffSeg));
5101 sel = pVCpu->cpum.GstCtx.ds.Sel;
5102 }
5103 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
5104 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
5105 {
5106 pFpuCtx->DS = 0;
5107 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
5108 }
5109 else if (!IEM_IS_LONG_MODE(pVCpu)) /** @todo this is weird. explain. */
5110 {
5111 pFpuCtx->DS = sel;
5112 pFpuCtx->FPUDP = GCPtrEff;
5113 }
5114 else
5115 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
5116}
5117
5118
5119/**
5120 * Rotates the stack registers in the push direction.
5121 *
5122 * @param pFpuCtx The FPU context.
5123 * @remarks This is a complete waste of time, but fxsave stores the registers in
5124 * stack order.
5125 */
5126DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
5127{
5128 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
5129 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
5130 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
5131 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
5132 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
5133 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
5134 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
5135 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
5136 pFpuCtx->aRegs[0].r80 = r80Tmp;
5137}
5138
5139
5140/**
5141 * Rotates the stack registers in the pop direction.
5142 *
5143 * @param pFpuCtx The FPU context.
5144 * @remarks This is a complete waste of time, but fxsave stores the registers in
5145 * stack order.
5146 */
5147DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
5148{
5149 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
5150 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
5151 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
5152 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
5153 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
5154 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
5155 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
5156 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
5157 pFpuCtx->aRegs[7].r80 = r80Tmp;
5158}
5159
5160
5161/**
5162 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
5163 * exception prevents it.
5164 *
5165 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5166 * @param pResult The FPU operation result to push.
5167 * @param pFpuCtx The FPU context.
5168 */
5169static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5170{
5171 /* Update FSW and bail if there are pending exceptions afterwards. */
5172 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5173 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5174 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5175 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5176 {
5177 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))
5178 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",
5179 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
5180 pFpuCtx->FSW = fFsw;
5181 return;
5182 }
5183
5184 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5185 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5186 {
5187 /* All is fine, push the actual value. */
5188 pFpuCtx->FTW |= RT_BIT(iNewTop);
5189 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
5190 }
5191 else if (pFpuCtx->FCW & X86_FCW_IM)
5192 {
5193 /* Masked stack overflow, push QNaN. */
5194 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5195 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5196 }
5197 else
5198 {
5199 /* Raise stack overflow, don't push anything. */
5200 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5201 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5202 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",
5203 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5204 return;
5205 }
5206
5207 fFsw &= ~X86_FSW_TOP_MASK;
5208 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5209 pFpuCtx->FSW = fFsw;
5210
5211 iemFpuRotateStackPush(pFpuCtx);
5212 RT_NOREF(pVCpu);
5213}
5214
5215
5216/**
5217 * Stores a result in a FPU register and updates the FSW and FTW.
5218 *
5219 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5220 * @param pFpuCtx The FPU context.
5221 * @param pResult The result to store.
5222 * @param iStReg Which FPU register to store it in.
5223 */
5224static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
5225{
5226 Assert(iStReg < 8);
5227 uint16_t fNewFsw = pFpuCtx->FSW;
5228 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;
5229 fNewFsw &= ~X86_FSW_C_MASK;
5230 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5231 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
5232 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
5233 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
5234 pFpuCtx->FSW = fNewFsw;
5235 pFpuCtx->FTW |= RT_BIT(iReg);
5236 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
5237 RT_NOREF(pVCpu);
5238}
5239
5240
5241/**
5242 * Only updates the FPU status word (FSW) with the result of the current
5243 * instruction.
5244 *
5245 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5246 * @param pFpuCtx The FPU context.
5247 * @param u16FSW The FSW output of the current instruction.
5248 */
5249static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
5250{
5251 uint16_t fNewFsw = pFpuCtx->FSW;
5252 fNewFsw &= ~X86_FSW_C_MASK;
5253 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;
5254 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
5255 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
5256 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
5257 pFpuCtx->FSW = fNewFsw;
5258 RT_NOREF(pVCpu);
5259}
5260
5261
5262/**
5263 * Pops one item off the FPU stack if no pending exception prevents it.
5264 *
5265 * @param pFpuCtx The FPU context.
5266 */
5267static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5268{
5269 /* Check pending exceptions. */
5270 uint16_t uFSW = pFpuCtx->FSW;
5271 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5272 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5273 return;
5274
5275 /* TOP--. */
5276 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
5277 uFSW &= ~X86_FSW_TOP_MASK;
5278 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5279 pFpuCtx->FSW = uFSW;
5280
5281 /* Mark the previous ST0 as empty. */
5282 iOldTop >>= X86_FSW_TOP_SHIFT;
5283 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
5284
5285 /* Rotate the registers. */
5286 iemFpuRotateStackPop(pFpuCtx);
5287}
5288
5289
5290/**
5291 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
5292 *
5293 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5294 * @param pResult The FPU operation result to push.
5295 * @param uFpuOpcode The FPU opcode value.
5296 */
5297void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
5298{
5299 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5300 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5301 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
5302}
5303
5304
5305/**
5306 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
5307 * and sets FPUDP and FPUDS.
5308 *
5309 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5310 * @param pResult The FPU operation result to push.
5311 * @param iEffSeg The effective segment register.
5312 * @param GCPtrEff The effective address relative to @a iEffSeg.
5313 * @param uFpuOpcode The FPU opcode value.
5314 */
5315void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5316 uint16_t uFpuOpcode) RT_NOEXCEPT
5317{
5318 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5319 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5320 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5321 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
5322}
5323
5324
5325/**
5326 * Replace ST0 with the first value and push the second onto the FPU stack,
5327 * unless a pending exception prevents it.
5328 *
5329 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5330 * @param pResult The FPU operation result to store and push.
5331 * @param uFpuOpcode The FPU opcode value.
5332 */
5333void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
5334{
5335 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5336 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5337
5338 /* Update FSW and bail if there are pending exceptions afterwards. */
5339 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5340 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5341 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5342 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5343 {
5344 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
5345 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",
5346 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
5347 pFpuCtx->FSW = fFsw;
5348 return;
5349 }
5350
5351 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5352 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5353 {
5354 /* All is fine, push the actual value. */
5355 pFpuCtx->FTW |= RT_BIT(iNewTop);
5356 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
5357 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
5358 }
5359 else if (pFpuCtx->FCW & X86_FCW_IM)
5360 {
5361 /* Masked stack overflow, push QNaN. */
5362 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5363 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5364 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5365 }
5366 else
5367 {
5368 /* Raise stack overflow, don't push anything. */
5369 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5370 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5371 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",
5372 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5373 return;
5374 }
5375
5376 fFsw &= ~X86_FSW_TOP_MASK;
5377 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5378 pFpuCtx->FSW = fFsw;
5379
5380 iemFpuRotateStackPush(pFpuCtx);
5381}
5382
5383
5384/**
5385 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5386 * FOP.
5387 *
5388 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5389 * @param pResult The result to store.
5390 * @param iStReg Which FPU register to store it in.
5391 * @param uFpuOpcode The FPU opcode value.
5392 */
5393void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5394{
5395 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5396 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5397 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5398}
5399
5400
5401/**
5402 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5403 * FOP, and then pops the stack.
5404 *
5405 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5406 * @param pResult The result to store.
5407 * @param iStReg Which FPU register to store it in.
5408 * @param uFpuOpcode The FPU opcode value.
5409 */
5410void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5411{
5412 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5413 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5414 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5415 iemFpuMaybePopOne(pFpuCtx);
5416}
5417
5418
5419/**
5420 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5421 * FPUDP, and FPUDS.
5422 *
5423 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5424 * @param pResult The result to store.
5425 * @param iStReg Which FPU register to store it in.
5426 * @param iEffSeg The effective memory operand selector register.
5427 * @param GCPtrEff The effective memory operand offset.
5428 * @param uFpuOpcode The FPU opcode value.
5429 */
5430void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5431 uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5432{
5433 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5434 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5435 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5436 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5437}
5438
5439
5440/**
5441 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5442 * FPUDP, and FPUDS, and then pops the stack.
5443 *
5444 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5445 * @param pResult The result to store.
5446 * @param iStReg Which FPU register to store it in.
5447 * @param iEffSeg The effective memory operand selector register.
5448 * @param GCPtrEff The effective memory operand offset.
5449 * @param uFpuOpcode The FPU opcode value.
5450 */
5451void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
5452 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5453{
5454 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5455 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5456 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5457 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5458 iemFpuMaybePopOne(pFpuCtx);
5459}
5460
5461
5462/**
5463 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5464 *
5465 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5466 * @param uFpuOpcode The FPU opcode value.
5467 */
5468void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5469{
5470 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5471 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5472}
5473
5474
5475/**
5476 * Updates the FSW, FOP, FPUIP, and FPUCS.
5477 *
5478 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5479 * @param u16FSW The FSW from the current instruction.
5480 * @param uFpuOpcode The FPU opcode value.
5481 */
5482void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5483{
5484 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5485 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5486 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5487}
5488
5489
5490/**
5491 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5492 *
5493 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5494 * @param u16FSW The FSW from the current instruction.
5495 * @param uFpuOpcode The FPU opcode value.
5496 */
5497void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5498{
5499 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5500 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5501 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5502 iemFpuMaybePopOne(pFpuCtx);
5503}
5504
5505
5506/**
5507 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5508 *
5509 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5510 * @param u16FSW The FSW from the current instruction.
5511 * @param iEffSeg The effective memory operand selector register.
5512 * @param GCPtrEff The effective memory operand offset.
5513 * @param uFpuOpcode The FPU opcode value.
5514 */
5515void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5516{
5517 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5518 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5519 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5520 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5521}
5522
5523
5524/**
5525 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5526 *
5527 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5528 * @param u16FSW The FSW from the current instruction.
5529 * @param uFpuOpcode The FPU opcode value.
5530 */
5531void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5532{
5533 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5534 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5535 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5536 iemFpuMaybePopOne(pFpuCtx);
5537 iemFpuMaybePopOne(pFpuCtx);
5538}
5539
5540
5541/**
5542 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5543 *
5544 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5545 * @param u16FSW The FSW from the current instruction.
5546 * @param iEffSeg The effective memory operand selector register.
5547 * @param GCPtrEff The effective memory operand offset.
5548 * @param uFpuOpcode The FPU opcode value.
5549 */
5550void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5551 uint16_t uFpuOpcode) RT_NOEXCEPT
5552{
5553 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5554 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5555 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5556 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5557 iemFpuMaybePopOne(pFpuCtx);
5558}
5559
5560
5561/**
5562 * Worker routine for raising an FPU stack underflow exception.
5563 *
5564 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5565 * @param pFpuCtx The FPU context.
5566 * @param iStReg The stack register being accessed.
5567 */
5568static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5569{
5570 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5571 if (pFpuCtx->FCW & X86_FCW_IM)
5572 {
5573 /* Masked underflow. */
5574 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5575 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5576 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5577 if (iStReg != UINT8_MAX)
5578 {
5579 pFpuCtx->FTW |= RT_BIT(iReg);
5580 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5581 }
5582 }
5583 else
5584 {
5585 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5586 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5587 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",
5588 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5589 }
5590 RT_NOREF(pVCpu);
5591}
5592
5593
5594/**
5595 * Raises a FPU stack underflow exception.
5596 *
5597 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5598 * @param iStReg The destination register that should be loaded
5599 * with QNaN if \#IS is not masked. Specify
5600 * UINT8_MAX if none (like for fcom).
5601 * @param uFpuOpcode The FPU opcode value.
5602 */
5603void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5604{
5605 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5606 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5607 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5608}
5609
5610
5611void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5612{
5613 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5614 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5615 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5616 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5617}
5618
5619
5620void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5621{
5622 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5623 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5624 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5625 iemFpuMaybePopOne(pFpuCtx);
5626}
5627
5628
5629void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5630 uint16_t uFpuOpcode) RT_NOEXCEPT
5631{
5632 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5633 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5634 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5635 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5636 iemFpuMaybePopOne(pFpuCtx);
5637}
5638
5639
5640void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5641{
5642 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5643 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5644 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
5645 iemFpuMaybePopOne(pFpuCtx);
5646 iemFpuMaybePopOne(pFpuCtx);
5647}
5648
5649
5650void iemFpuStackPushUnderflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5651{
5652 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5653 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5654
5655 if (pFpuCtx->FCW & X86_FCW_IM)
5656 {
5657 /* Masked overflow - Push QNaN. */
5658 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5659 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5660 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5661 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5662 pFpuCtx->FTW |= RT_BIT(iNewTop);
5663 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5664 iemFpuRotateStackPush(pFpuCtx);
5665 }
5666 else
5667 {
5668 /* Exception pending - don't change TOP or the register stack. */
5669 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5670 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5671 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",
5672 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5673 }
5674}
5675
5676
5677void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5678{
5679 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5680 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5681
5682 if (pFpuCtx->FCW & X86_FCW_IM)
5683 {
5684 /* Masked overflow - Push QNaN. */
5685 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5686 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5687 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5688 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5689 pFpuCtx->FTW |= RT_BIT(iNewTop);
5690 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5691 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5692 iemFpuRotateStackPush(pFpuCtx);
5693 }
5694 else
5695 {
5696 /* Exception pending - don't change TOP or the register stack. */
5697 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5698 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5699 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",
5700 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5701 }
5702}
5703
5704
5705/**
5706 * Worker routine for raising an FPU stack overflow exception on a push.
5707 *
5708 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5709 * @param pFpuCtx The FPU context.
5710 */
5711static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5712{
5713 if (pFpuCtx->FCW & X86_FCW_IM)
5714 {
5715 /* Masked overflow. */
5716 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5717 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5718 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5719 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5720 pFpuCtx->FTW |= RT_BIT(iNewTop);
5721 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5722 iemFpuRotateStackPush(pFpuCtx);
5723 }
5724 else
5725 {
5726 /* Exception pending - don't change TOP or the register stack. */
5727 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5728 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5729 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",
5730 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5731 }
5732 RT_NOREF(pVCpu);
5733}
5734
5735
5736/**
5737 * Raises a FPU stack overflow exception on a push.
5738 *
5739 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5740 * @param uFpuOpcode The FPU opcode value.
5741 */
5742void iemFpuStackPushOverflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5743{
5744 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5745 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5746 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5747}
5748
5749
5750/**
5751 * Raises a FPU stack overflow exception on a push with a memory operand.
5752 *
5753 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5754 * @param iEffSeg The effective memory operand selector register.
5755 * @param GCPtrEff The effective memory operand offset.
5756 * @param uFpuOpcode The FPU opcode value.
5757 */
5758void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5759{
5760 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5761 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5762 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5763 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5764}
5765
5766/** @} */
5767
5768
5769/** @name Memory access.
5770 *
5771 * @{
5772 */
5773
5774#undef LOG_GROUP
5775#define LOG_GROUP LOG_GROUP_IEM_MEM
5776
5777/**
5778 * Updates the IEMCPU::cbWritten counter if applicable.
5779 *
5780 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5781 * @param fAccess The access being accounted for.
5782 * @param cbMem The access size.
5783 */
5784DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5785{
5786 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5787 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5788 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5789}
5790
5791
5792/**
5793 * Applies the segment limit, base and attributes.
5794 *
5795 * This may raise a \#GP or \#SS.
5796 *
5797 * @returns VBox strict status code.
5798 *
5799 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5800 * @param fAccess The kind of access which is being performed.
5801 * @param iSegReg The index of the segment register to apply.
5802 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5803 * TSS, ++).
5804 * @param cbMem The access size.
5805 * @param pGCPtrMem Pointer to the guest memory address to apply
5806 * segmentation to. Input and output parameter.
5807 */
5808VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5809{
5810 if (iSegReg == UINT8_MAX)
5811 return VINF_SUCCESS;
5812
5813 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5814 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5815 switch (IEM_GET_CPU_MODE(pVCpu))
5816 {
5817 case IEMMODE_16BIT:
5818 case IEMMODE_32BIT:
5819 {
5820 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5821 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5822
5823 if ( pSel->Attr.n.u1Present
5824 && !pSel->Attr.n.u1Unusable)
5825 {
5826 Assert(pSel->Attr.n.u1DescType);
5827 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5828 {
5829 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5830 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5831 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5832
5833 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5834 {
5835 /** @todo CPL check. */
5836 }
5837
5838 /*
5839 * There are two kinds of data selectors, normal and expand down.
5840 */
5841 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5842 {
5843 if ( GCPtrFirst32 > pSel->u32Limit
5844 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5845 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5846 }
5847 else
5848 {
5849 /*
5850 * The upper boundary is defined by the B bit, not the G bit!
5851 */
5852 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5853 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5854 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5855 }
5856 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5857 }
5858 else
5859 {
5860 /*
5861 * Code selector and usually be used to read thru, writing is
5862 * only permitted in real and V8086 mode.
5863 */
5864 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5865 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5866 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5867 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5868 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5869
5870 if ( GCPtrFirst32 > pSel->u32Limit
5871 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5872 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5873
5874 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5875 {
5876 /** @todo CPL check. */
5877 }
5878
5879 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5880 }
5881 }
5882 else
5883 return iemRaiseGeneralProtectionFault0(pVCpu);
5884 return VINF_SUCCESS;
5885 }
5886
5887 case IEMMODE_64BIT:
5888 {
5889 RTGCPTR GCPtrMem = *pGCPtrMem;
5890 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5891 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5892
5893 Assert(cbMem >= 1);
5894 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5895 return VINF_SUCCESS;
5896 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5897 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5898 return iemRaiseGeneralProtectionFault0(pVCpu);
5899 }
5900
5901 default:
5902 AssertFailedReturn(VERR_IEM_IPE_7);
5903 }
5904}
5905
5906
5907/**
5908 * Translates a virtual address to a physical physical address and checks if we
5909 * can access the page as specified.
5910 *
5911 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5912 * @param GCPtrMem The virtual address.
5913 * @param cbAccess The access size, for raising \#PF correctly for
5914 * FXSAVE and such.
5915 * @param fAccess The intended access.
5916 * @param pGCPhysMem Where to return the physical address.
5917 */
5918VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess,
5919 uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5920{
5921 /** @todo Need a different PGM interface here. We're currently using
5922 * generic / REM interfaces. this won't cut it for R0. */
5923 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5924 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5925 * here. */
5926 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
5927 PGMPTWALKFAST WalkFast;
5928 AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);
5929 AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);
5930 AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);
5931 AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);
5932 uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))
5933 | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);
5934 if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5935 fQPage |= PGMQPAGE_F_USER_MODE;
5936 int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);
5937 if (RT_SUCCESS(rc))
5938 {
5939 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
5940
5941 /* If the page is writable and does not have the no-exec bit set, all
5942 access is allowed. Otherwise we'll have to check more carefully... */
5943 Assert( (WalkFast.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) == (X86_PTE_RW | X86_PTE_US)
5944 || ( ( !(fAccess & IEM_ACCESS_TYPE_WRITE)
5945 || (WalkFast.fEffective & X86_PTE_RW)
5946 || ( ( IEM_GET_CPL(pVCpu) != 3
5947 || (fAccess & IEM_ACCESS_WHAT_SYS))
5948 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)) )
5949 && ( (WalkFast.fEffective & X86_PTE_US)
5950 || IEM_GET_CPL(pVCpu) != 3
5951 || (fAccess & IEM_ACCESS_WHAT_SYS) )
5952 && ( !(fAccess & IEM_ACCESS_TYPE_EXEC)
5953 || !(WalkFast.fEffective & X86_PTE_PAE_NX)
5954 || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
5955 )
5956 );
5957
5958 /* PGMGstQueryPageFast sets the A & D bits. */
5959 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5960 Assert(!(~WalkFast.fEffective & (fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A)));
5961
5962 *pGCPhysMem = WalkFast.GCPhys;
5963 return VINF_SUCCESS;
5964 }
5965
5966 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5967 /** @todo Check unassigned memory in unpaged mode. */
5968#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5969 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
5970 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5971#endif
5972 *pGCPhysMem = NIL_RTGCPHYS;
5973 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, rc);
5974}
5975
5976#if 0 /*unused*/
5977/**
5978 * Looks up a memory mapping entry.
5979 *
5980 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
5981 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5982 * @param pvMem The memory address.
5983 * @param fAccess The access to.
5984 */
5985DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
5986{
5987 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5988 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
5989 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
5990 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5991 return 0;
5992 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
5993 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5994 return 1;
5995 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
5996 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5997 return 2;
5998 return VERR_NOT_FOUND;
5999}
6000#endif
6001
6002/**
6003 * Finds a free memmap entry when using iNextMapping doesn't work.
6004 *
6005 * @returns Memory mapping index, 1024 on failure.
6006 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6007 */
6008static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
6009{
6010 /*
6011 * The easy case.
6012 */
6013 if (pVCpu->iem.s.cActiveMappings == 0)
6014 {
6015 pVCpu->iem.s.iNextMapping = 1;
6016 return 0;
6017 }
6018
6019 /* There should be enough mappings for all instructions. */
6020 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
6021
6022 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
6023 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
6024 return i;
6025
6026 AssertFailedReturn(1024);
6027}
6028
6029
6030/**
6031 * Commits a bounce buffer that needs writing back and unmaps it.
6032 *
6033 * @returns Strict VBox status code.
6034 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6035 * @param iMemMap The index of the buffer to commit.
6036 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
6037 * Always false in ring-3, obviously.
6038 */
6039static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
6040{
6041 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
6042 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
6043#ifdef IN_RING3
6044 Assert(!fPostponeFail);
6045 RT_NOREF_PV(fPostponeFail);
6046#endif
6047
6048 /*
6049 * Do the writing.
6050 */
6051 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6052 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
6053 {
6054 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
6055 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
6056 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6057 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6058 {
6059 /*
6060 * Carefully and efficiently dealing with access handler return
6061 * codes make this a little bloated.
6062 */
6063 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
6064 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
6065 pbBuf,
6066 cbFirst,
6067 PGMACCESSORIGIN_IEM);
6068 if (rcStrict == VINF_SUCCESS)
6069 {
6070 if (cbSecond)
6071 {
6072 rcStrict = PGMPhysWrite(pVM,
6073 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
6074 pbBuf + cbFirst,
6075 cbSecond,
6076 PGMACCESSORIGIN_IEM);
6077 if (rcStrict == VINF_SUCCESS)
6078 { /* nothing */ }
6079 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6080 {
6081 LogEx(LOG_GROUP_IEM,
6082 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
6083 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6084 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6085 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6086 }
6087#ifndef IN_RING3
6088 else if (fPostponeFail)
6089 {
6090 LogEx(LOG_GROUP_IEM,
6091 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
6092 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6093 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6094 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
6095 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
6096 return iemSetPassUpStatus(pVCpu, rcStrict);
6097 }
6098#endif
6099 else
6100 {
6101 LogEx(LOG_GROUP_IEM,
6102 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6103 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6104 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6105 return rcStrict;
6106 }
6107 }
6108 }
6109 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6110 {
6111 if (!cbSecond)
6112 {
6113 LogEx(LOG_GROUP_IEM,
6114 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
6115 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6116 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6117 }
6118 else
6119 {
6120 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
6121 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
6122 pbBuf + cbFirst,
6123 cbSecond,
6124 PGMACCESSORIGIN_IEM);
6125 if (rcStrict2 == VINF_SUCCESS)
6126 {
6127 LogEx(LOG_GROUP_IEM,
6128 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
6129 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6130 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6131 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6132 }
6133 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6134 {
6135 LogEx(LOG_GROUP_IEM,
6136 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
6137 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6138 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6139 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6140 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6141 }
6142#ifndef IN_RING3
6143 else if (fPostponeFail)
6144 {
6145 LogEx(LOG_GROUP_IEM,
6146 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
6147 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6148 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6149 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
6150 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
6151 return iemSetPassUpStatus(pVCpu, rcStrict);
6152 }
6153#endif
6154 else
6155 {
6156 LogEx(LOG_GROUP_IEM,
6157 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6158 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6159 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6160 return rcStrict2;
6161 }
6162 }
6163 }
6164#ifndef IN_RING3
6165 else if (fPostponeFail)
6166 {
6167 LogEx(LOG_GROUP_IEM,
6168 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
6169 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6170 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6171 if (!cbSecond)
6172 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
6173 else
6174 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
6175 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
6176 return iemSetPassUpStatus(pVCpu, rcStrict);
6177 }
6178#endif
6179 else
6180 {
6181 LogEx(LOG_GROUP_IEM,
6182 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6183 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6184 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6185 return rcStrict;
6186 }
6187 }
6188 else
6189 {
6190 /*
6191 * No access handlers, much simpler.
6192 */
6193 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
6194 if (RT_SUCCESS(rc))
6195 {
6196 if (cbSecond)
6197 {
6198 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
6199 if (RT_SUCCESS(rc))
6200 { /* likely */ }
6201 else
6202 {
6203 LogEx(LOG_GROUP_IEM,
6204 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6205 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6206 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
6207 return rc;
6208 }
6209 }
6210 }
6211 else
6212 {
6213 LogEx(LOG_GROUP_IEM,
6214 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6215 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
6216 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6217 return rc;
6218 }
6219 }
6220 }
6221
6222#if defined(IEM_LOG_MEMORY_WRITES)
6223 Log5(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
6224 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
6225 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
6226 Log5(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
6227 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
6228 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
6229
6230 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
6231 g_cbIemWrote = cbWrote;
6232 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
6233#endif
6234
6235 /*
6236 * Free the mapping entry.
6237 */
6238 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6239 Assert(pVCpu->iem.s.cActiveMappings != 0);
6240 pVCpu->iem.s.cActiveMappings--;
6241 return VINF_SUCCESS;
6242}
6243
6244
6245/**
6246 * Helper for iemMemMap, iemMemMapJmp and iemMemBounceBufferMapCrossPage.
6247 */
6248DECL_FORCE_INLINE(uint32_t)
6249iemMemCheckDataBreakpoint(PVMCC pVM, PVMCPUCC pVCpu, RTGCPTR GCPtrMem, size_t cbMem, uint32_t fAccess)
6250{
6251 bool const fSysAccess = (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_SYS;
6252 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6253 return DBGFBpCheckDataWrite(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);
6254 return DBGFBpCheckDataRead(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);
6255}
6256
6257
6258/**
6259 * iemMemMap worker that deals with a request crossing pages.
6260 */
6261static VBOXSTRICTRC
6262iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, uint8_t *pbUnmapInfo,
6263 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
6264{
6265 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemBounceBufferCrossPage);
6266 Assert(cbMem <= GUEST_PAGE_SIZE);
6267
6268 /*
6269 * Do the address translations.
6270 */
6271 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
6272 RTGCPHYS GCPhysFirst;
6273 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
6274 if (rcStrict != VINF_SUCCESS)
6275 return rcStrict;
6276 Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
6277
6278 uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
6279 RTGCPHYS GCPhysSecond;
6280 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
6281 cbSecondPage, fAccess, &GCPhysSecond);
6282 if (rcStrict != VINF_SUCCESS)
6283 return rcStrict;
6284 Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
6285 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
6286
6287 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6288
6289 /*
6290 * Check for data breakpoints.
6291 */
6292 if (RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA)))
6293 { /* likely */ }
6294 else
6295 {
6296 uint32_t fDataBps = iemMemCheckDataBreakpoint(pVM, pVCpu, GCPtrFirst, cbFirstPage, fAccess);
6297 fDataBps |= iemMemCheckDataBreakpoint(pVM, pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
6298 cbSecondPage, fAccess);
6299 pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
6300 if (fDataBps > 1)
6301 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapCrossPage: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",
6302 fDataBps, GCPtrFirst, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
6303 }
6304
6305 /*
6306 * Read in the current memory content if it's a read, execute or partial
6307 * write access.
6308 */
6309 uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6310
6311 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6312 {
6313 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6314 {
6315 /*
6316 * Must carefully deal with access handler status codes here,
6317 * makes the code a bit bloated.
6318 */
6319 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
6320 if (rcStrict == VINF_SUCCESS)
6321 {
6322 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6323 if (rcStrict == VINF_SUCCESS)
6324 { /*likely */ }
6325 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6326 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6327 else
6328 {
6329 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
6330 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6331 return rcStrict;
6332 }
6333 }
6334 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6335 {
6336 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6337 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6338 {
6339 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6340 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6341 }
6342 else
6343 {
6344 LogEx(LOG_GROUP_IEM,
6345 ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6346 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6347 return rcStrict2;
6348 }
6349 }
6350 else
6351 {
6352 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6353 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6354 return rcStrict;
6355 }
6356 }
6357 else
6358 {
6359 /*
6360 * No informational status codes here, much more straight forward.
6361 */
6362 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6363 if (RT_SUCCESS(rc))
6364 {
6365 Assert(rc == VINF_SUCCESS);
6366 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6367 if (RT_SUCCESS(rc))
6368 Assert(rc == VINF_SUCCESS);
6369 else
6370 {
6371 LogEx(LOG_GROUP_IEM,
6372 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6373 return rc;
6374 }
6375 }
6376 else
6377 {
6378 LogEx(LOG_GROUP_IEM,
6379 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6380 return rc;
6381 }
6382 }
6383 }
6384#ifdef VBOX_STRICT
6385 else
6386 memset(pbBuf, 0xcc, cbMem);
6387 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6388 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6389#endif
6390 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
6391
6392 /*
6393 * Commit the bounce buffer entry.
6394 */
6395 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6396 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6397 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6398 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6399 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
6400 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6401 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6402 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6403 pVCpu->iem.s.cActiveMappings++;
6404
6405 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6406 *ppvMem = pbBuf;
6407 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6408 return VINF_SUCCESS;
6409}
6410
6411
6412/**
6413 * iemMemMap woker that deals with iemMemPageMap failures.
6414 */
6415static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem,
6416 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6417{
6418 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemBounceBufferMapPhys);
6419
6420 /*
6421 * Filter out conditions we can handle and the ones which shouldn't happen.
6422 */
6423 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6424 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6425 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6426 {
6427 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6428 return rcMap;
6429 }
6430 pVCpu->iem.s.cPotentialExits++;
6431
6432 /*
6433 * Read in the current memory content if it's a read, execute or partial
6434 * write access.
6435 */
6436 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6437 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6438 {
6439 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6440 memset(pbBuf, 0xff, cbMem);
6441 else
6442 {
6443 int rc;
6444 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6445 {
6446 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6447 if (rcStrict == VINF_SUCCESS)
6448 { /* nothing */ }
6449 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6450 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6451 else
6452 {
6453 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6454 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6455 return rcStrict;
6456 }
6457 }
6458 else
6459 {
6460 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
6461 if (RT_SUCCESS(rc))
6462 { /* likely */ }
6463 else
6464 {
6465 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6466 GCPhysFirst, rc));
6467 return rc;
6468 }
6469 }
6470 }
6471 }
6472#ifdef VBOX_STRICT
6473 else
6474 memset(pbBuf, 0xcc, cbMem);
6475#endif
6476#ifdef VBOX_STRICT
6477 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6478 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6479#endif
6480
6481 /*
6482 * Commit the bounce buffer entry.
6483 */
6484 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6485 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6486 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6487 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
6488 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6489 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6490 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6491 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6492 pVCpu->iem.s.cActiveMappings++;
6493
6494 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6495 *ppvMem = pbBuf;
6496 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6497 return VINF_SUCCESS;
6498}
6499
6500
6501
6502/**
6503 * Maps the specified guest memory for the given kind of access.
6504 *
6505 * This may be using bounce buffering of the memory if it's crossing a page
6506 * boundary or if there is an access handler installed for any of it. Because
6507 * of lock prefix guarantees, we're in for some extra clutter when this
6508 * happens.
6509 *
6510 * This may raise a \#GP, \#SS, \#PF or \#AC.
6511 *
6512 * @returns VBox strict status code.
6513 *
6514 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6515 * @param ppvMem Where to return the pointer to the mapped memory.
6516 * @param pbUnmapInfo Where to return unmap info to be passed to
6517 * iemMemCommitAndUnmap or iemMemRollbackAndUnmap when
6518 * done.
6519 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
6520 * 8, 12, 16, 32 or 512. When used by string operations
6521 * it can be up to a page.
6522 * @param iSegReg The index of the segment register to use for this
6523 * access. The base and limits are checked. Use UINT8_MAX
6524 * to indicate that no segmentation is required (for IDT,
6525 * GDT and LDT accesses).
6526 * @param GCPtrMem The address of the guest memory.
6527 * @param fAccess How the memory is being accessed. The
6528 * IEM_ACCESS_TYPE_XXX part is used to figure out how to
6529 * map the memory, while the IEM_ACCESS_WHAT_XXX part is
6530 * used when raising exceptions. The IEM_ACCESS_ATOMIC and
6531 * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be
6532 * set.
6533 * @param uAlignCtl Alignment control:
6534 * - Bits 15:0 is the alignment mask.
6535 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6536 * IEM_MEMMAP_F_ALIGN_SSE, and
6537 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6538 * Pass zero to skip alignment.
6539 */
6540VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
6541 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
6542{
6543 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemMapNoJmp);
6544
6545 /*
6546 * Check the input and figure out which mapping entry to use.
6547 */
6548 Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));
6549 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94
6550 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );
6551 Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE)));
6552 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6553
6554 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6555 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6556 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6557 {
6558 iMemMap = iemMemMapFindFree(pVCpu);
6559 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6560 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6561 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6562 pVCpu->iem.s.aMemMappings[2].fAccess),
6563 VERR_IEM_IPE_9);
6564 }
6565
6566 /*
6567 * Map the memory, checking that we can actually access it. If something
6568 * slightly complicated happens, fall back on bounce buffering.
6569 */
6570 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6571 if (rcStrict == VINF_SUCCESS)
6572 { /* likely */ }
6573 else
6574 return rcStrict;
6575
6576 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
6577 { /* likely */ }
6578 else
6579 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
6580
6581 /*
6582 * Alignment check.
6583 */
6584 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6585 { /* likelyish */ }
6586 else
6587 {
6588 /* Misaligned access. */
6589 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6590 {
6591 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6592 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6593 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6594 {
6595 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6596
6597 if (!iemMemAreAlignmentChecksEnabled(pVCpu))
6598 { /* likely */ }
6599 else
6600 return iemRaiseAlignmentCheckException(pVCpu);
6601 }
6602 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6603 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6604 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6605 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6606 * that's what FXSAVE does on a 10980xe. */
6607 && iemMemAreAlignmentChecksEnabled(pVCpu))
6608 return iemRaiseAlignmentCheckException(pVCpu);
6609 else
6610 return iemRaiseGeneralProtectionFault0(pVCpu);
6611 }
6612
6613#if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64)
6614 /* If the access is atomic there are host platform alignmnet restrictions
6615 we need to conform with. */
6616 if ( !(fAccess & IEM_ACCESS_ATOMIC)
6617# if defined(RT_ARCH_AMD64)
6618 || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */
6619# elif defined(RT_ARCH_ARM64)
6620 || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */
6621# else
6622# error port me
6623# endif
6624 )
6625 { /* okay */ }
6626 else
6627 {
6628 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem));
6629 pVCpu->iem.s.cMisalignedAtomics += 1;
6630 return VINF_EM_EMULATE_SPLIT_LOCK;
6631 }
6632#endif
6633 }
6634
6635#ifdef IEM_WITH_DATA_TLB
6636 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6637
6638 /*
6639 * Get the TLB entry for this page and check PT flags.
6640 *
6641 * We reload the TLB entry if we need to set the dirty bit (accessed
6642 * should in theory always be set).
6643 */
6644 uint8_t *pbMem = NULL;
6645 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrMem);
6646 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
6647 uint64_t const fTlbeAD = IEMTLBE_F_PT_NO_ACCESSED | (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0);
6648 if ( ( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
6649 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) )
6650 || ( (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)
6651 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) ) )
6652 {
6653# ifdef IEM_WITH_TLB_STATISTICS
6654 pVCpu->iem.s.DataTlb.cTlbCoreHits++;
6655#endif
6656
6657 /* If the page is either supervisor only or non-writable, we need to do
6658 more careful access checks. */
6659 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
6660 {
6661 /* Write to read only memory? */
6662 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
6663 && (fAccess & IEM_ACCESS_TYPE_WRITE)
6664 && ( ( IEM_GET_CPL(pVCpu) == 3
6665 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6666 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
6667 {
6668 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6669 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6670 }
6671
6672 /* Kernel memory accessed by userland? */
6673 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6674 && IEM_GET_CPL(pVCpu) == 3
6675 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6676 {
6677 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6678 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6679 }
6680 }
6681
6682 /* Look up the physical page info if necessary. */
6683 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6684# ifdef IN_RING3
6685 pbMem = pTlbe->pbMappingR3;
6686# else
6687 pbMem = NULL;
6688# endif
6689 else
6690 {
6691 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
6692 { /* likely */ }
6693 else
6694 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
6695 pTlbe->pbMappingR3 = NULL;
6696 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;
6697 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6698 &pbMem, &pTlbe->fFlagsAndPhysRev);
6699 AssertRCReturn(rc, rc);
6700# ifdef IN_RING3
6701 pTlbe->pbMappingR3 = pbMem;
6702# endif
6703 }
6704 }
6705 else
6706 {
6707 pVCpu->iem.s.DataTlb.cTlbCoreMisses++;
6708
6709 /* This page table walking will set A bits as required by the access while performing the walk.
6710 ASSUMES these are set when the address is translated rather than on commit... */
6711 /** @todo testcase: check when A bits are actually set by the CPU for code. */
6712 PGMPTWALKFAST WalkFast;
6713 AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);
6714 AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);
6715 AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);
6716 AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);
6717 uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))
6718 | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);
6719 if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6720 fQPage |= PGMQPAGE_F_USER_MODE;
6721 int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);
6722 if (RT_SUCCESS(rc))
6723 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
6724 else
6725 {
6726 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6727# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6728 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
6729 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6730# endif
6731 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6732 }
6733
6734 uint32_t fDataBps;
6735 if ( RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA))
6736 || RT_LIKELY(!(fDataBps = iemMemCheckDataBreakpoint(pVCpu->CTX_SUFF(pVM), pVCpu, GCPtrMem, cbMem, fAccess))))
6737 {
6738 if ( !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)
6739 || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */
6740 {
6741 pTlbe--;
6742 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision;
6743 }
6744 else
6745 {
6746 pVCpu->iem.s.DataTlb.cTlbCoreGlobalLoads++;
6747 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal;
6748 }
6749 }
6750 else
6751 {
6752 /* If we hit a data breakpoint, we use a dummy TLBE to force all accesses
6753 to the page with the data access breakpoint armed on it to pass thru here. */
6754 if (fDataBps > 1)
6755 LogEx(LOG_GROUP_IEM, ("iemMemMap: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",
6756 fDataBps, GCPtrMem, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
6757 pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
6758 pTlbe = &pVCpu->iem.s.DataBreakpointTlbe;
6759 pTlbe->uTag = uTagNoRev;
6760 }
6761 pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A) /* skipping NX */)
6762 | (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE);
6763 RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
6764 pTlbe->GCPhys = GCPhysPg;
6765 pTlbe->pbMappingR3 = NULL;
6766 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
6767 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_DIRTY) || !(fAccess & IEM_ACCESS_TYPE_WRITE));
6768 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE) || !(fAccess & IEM_ACCESS_TYPE_WRITE));
6769 Assert( !(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6770 || IEM_GET_CPL(pVCpu) != 3
6771 || (fAccess & IEM_ACCESS_WHAT_SYS));
6772
6773 /* Resolve the physical address. */
6774 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));
6775 rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6776 &pbMem, &pTlbe->fFlagsAndPhysRev);
6777 AssertRCReturn(rc, rc);
6778# ifdef IN_RING3
6779 pTlbe->pbMappingR3 = pbMem;
6780# endif
6781 }
6782
6783 /*
6784 * Check the physical page level access and mapping.
6785 */
6786 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
6787 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
6788 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
6789 { /* probably likely */ }
6790 else
6791 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem,
6792 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6793 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6794 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6795 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6796 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6797
6798 if (pbMem)
6799 {
6800 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6801 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6802 fAccess |= IEM_ACCESS_NOT_LOCKED;
6803 }
6804 else
6805 {
6806 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6807 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6808 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6809 if (rcStrict != VINF_SUCCESS)
6810 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6811 }
6812
6813 void * const pvMem = pbMem;
6814
6815 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6816 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6817 if (fAccess & IEM_ACCESS_TYPE_READ)
6818 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6819
6820#else /* !IEM_WITH_DATA_TLB */
6821
6822 RTGCPHYS GCPhysFirst;
6823 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6824 if (rcStrict != VINF_SUCCESS)
6825 return rcStrict;
6826
6827 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6828 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6829 if (fAccess & IEM_ACCESS_TYPE_READ)
6830 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6831
6832 void *pvMem;
6833 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6834 if (rcStrict != VINF_SUCCESS)
6835 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6836
6837#endif /* !IEM_WITH_DATA_TLB */
6838
6839 /*
6840 * Fill in the mapping table entry.
6841 */
6842 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6843 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6844 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6845 pVCpu->iem.s.cActiveMappings += 1;
6846
6847 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6848 *ppvMem = pvMem;
6849 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6850 AssertCompile(IEM_ACCESS_TYPE_MASK <= 0xf);
6851 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.aMemMappings) < 8);
6852
6853 return VINF_SUCCESS;
6854}
6855
6856
6857/**
6858 * Commits the guest memory if bounce buffered and unmaps it.
6859 *
6860 * @returns Strict VBox status code.
6861 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6862 * @param bUnmapInfo Unmap info set by iemMemMap.
6863 */
6864VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6865{
6866 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6867 AssertMsgReturn( (bUnmapInfo & 0x08)
6868 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6869 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf)) == ((unsigned)bUnmapInfo >> 4),
6870 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
6871 VERR_NOT_FOUND);
6872
6873 /* If it's bounce buffered, we may need to write back the buffer. */
6874 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6875 {
6876 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6877 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6878 }
6879 /* Otherwise unlock it. */
6880 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6881 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6882
6883 /* Free the entry. */
6884 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6885 Assert(pVCpu->iem.s.cActiveMappings != 0);
6886 pVCpu->iem.s.cActiveMappings--;
6887 return VINF_SUCCESS;
6888}
6889
6890
6891/**
6892 * Rolls back the guest memory (conceptually only) and unmaps it.
6893 *
6894 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6895 * @param bUnmapInfo Unmap info set by iemMemMap.
6896 */
6897void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6898{
6899 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6900 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
6901 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6902 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
6903 == ((unsigned)bUnmapInfo >> 4),
6904 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
6905
6906 /* Unlock it if necessary. */
6907 if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6908 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6909
6910 /* Free the entry. */
6911 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6912 Assert(pVCpu->iem.s.cActiveMappings != 0);
6913 pVCpu->iem.s.cActiveMappings--;
6914}
6915
6916#ifdef IEM_WITH_SETJMP
6917
6918/**
6919 * Maps the specified guest memory for the given kind of access, longjmp on
6920 * error.
6921 *
6922 * This may be using bounce buffering of the memory if it's crossing a page
6923 * boundary or if there is an access handler installed for any of it. Because
6924 * of lock prefix guarantees, we're in for some extra clutter when this
6925 * happens.
6926 *
6927 * This may raise a \#GP, \#SS, \#PF or \#AC.
6928 *
6929 * @returns Pointer to the mapped memory.
6930 *
6931 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6932 * @param bUnmapInfo Where to return unmap info to be passed to
6933 * iemMemCommitAndUnmapJmp, iemMemCommitAndUnmapRwSafeJmp,
6934 * iemMemCommitAndUnmapWoSafeJmp,
6935 * iemMemCommitAndUnmapRoSafeJmp,
6936 * iemMemRollbackAndUnmapWoSafe or iemMemRollbackAndUnmap
6937 * when done.
6938 * @param cbMem The number of bytes to map. This is usually 1,
6939 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6940 * string operations it can be up to a page.
6941 * @param iSegReg The index of the segment register to use for
6942 * this access. The base and limits are checked.
6943 * Use UINT8_MAX to indicate that no segmentation
6944 * is required (for IDT, GDT and LDT accesses).
6945 * @param GCPtrMem The address of the guest memory.
6946 * @param fAccess How the memory is being accessed. The
6947 * IEM_ACCESS_TYPE_XXX part is used to figure out how to
6948 * map the memory, while the IEM_ACCESS_WHAT_XXX part is
6949 * used when raising exceptions. The IEM_ACCESS_ATOMIC and
6950 * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be
6951 * set.
6952 * @param uAlignCtl Alignment control:
6953 * - Bits 15:0 is the alignment mask.
6954 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6955 * IEM_MEMMAP_F_ALIGN_SSE, and
6956 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6957 * Pass zero to skip alignment.
6958 * @tparam a_fSafe Whether this is a call from "safe" fallback function in
6959 * IEMAllMemRWTmpl.cpp.h (@c true) or a generic one that
6960 * needs counting as such in the statistics.
6961 */
6962template<bool a_fSafeCall = false>
6963static void *iemMemMapJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
6964 uint32_t fAccess, uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
6965{
6966 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemMapJmp);
6967
6968 /*
6969 * Check the input, check segment access and adjust address
6970 * with segment base.
6971 */
6972 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6973 Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE)));
6974 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6975
6976 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6977 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6978 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6979
6980 /*
6981 * Alignment check.
6982 */
6983 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6984 { /* likelyish */ }
6985 else
6986 {
6987 /* Misaligned access. */
6988 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6989 {
6990 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6991 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6992 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6993 {
6994 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6995
6996 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6997 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6998 }
6999 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
7000 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
7001 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
7002 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
7003 * that's what FXSAVE does on a 10980xe. */
7004 && iemMemAreAlignmentChecksEnabled(pVCpu))
7005 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
7006 else
7007 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
7008 }
7009
7010#if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64)
7011 /* If the access is atomic there are host platform alignmnet restrictions
7012 we need to conform with. */
7013 if ( !(fAccess & IEM_ACCESS_ATOMIC)
7014# if defined(RT_ARCH_AMD64)
7015 || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */
7016# elif defined(RT_ARCH_ARM64)
7017 || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */
7018# else
7019# error port me
7020# endif
7021 )
7022 { /* okay */ }
7023 else
7024 {
7025 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem));
7026 pVCpu->iem.s.cMisalignedAtomics += 1;
7027 IEM_DO_LONGJMP(pVCpu, VINF_EM_EMULATE_SPLIT_LOCK);
7028 }
7029#endif
7030 }
7031
7032 /*
7033 * Figure out which mapping entry to use.
7034 */
7035 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
7036 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
7037 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
7038 {
7039 iMemMap = iemMemMapFindFree(pVCpu);
7040 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
7041 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
7042 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
7043 pVCpu->iem.s.aMemMappings[2].fAccess),
7044 IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_9));
7045 }
7046
7047 /*
7048 * Crossing a page boundary?
7049 */
7050 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
7051 { /* No (likely). */ }
7052 else
7053 {
7054 void *pvMem;
7055 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
7056 if (rcStrict == VINF_SUCCESS)
7057 return pvMem;
7058 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7059 }
7060
7061#ifdef IEM_WITH_DATA_TLB
7062 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
7063
7064 /*
7065 * Get the TLB entry for this page checking that it has the A & D bits
7066 * set as per fAccess flags.
7067 */
7068 /** @todo make the caller pass these in with fAccess. */
7069 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && IEM_GET_CPL(pVCpu) == 3
7070 ? IEMTLBE_F_PT_NO_USER : 0;
7071 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
7072 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
7073 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
7074 || (IEM_GET_CPL(pVCpu) == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
7075 ? IEMTLBE_F_PT_NO_WRITE : 0)
7076 : 0;
7077 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
7078 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrMem);
7079 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
7080 uint64_t const fTlbeAD = IEMTLBE_F_PT_NO_ACCESSED | (fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY);
7081 if ( ( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
7082 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) )
7083 || ( (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)
7084 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) ) )
7085 {
7086# ifdef IEM_WITH_TLB_STATISTICS
7087 if (a_fSafeCall)
7088 pVCpu->iem.s.DataTlb.cTlbSafeHits++;
7089 else
7090 pVCpu->iem.s.DataTlb.cTlbCoreHits++;
7091# endif
7092 }
7093 else
7094 {
7095 if (a_fSafeCall)
7096 pVCpu->iem.s.DataTlb.cTlbSafeMisses++;
7097 else
7098 pVCpu->iem.s.DataTlb.cTlbCoreMisses++;
7099
7100 /* This page table walking will set A and D bits as required by the
7101 access while performing the walk.
7102 ASSUMES these are set when the address is translated rather than on commit... */
7103 /** @todo testcase: check when A and D bits are actually set by the CPU. */
7104 PGMPTWALKFAST WalkFast;
7105 AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);
7106 AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);
7107 AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);
7108 AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);
7109 uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))
7110 | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);
7111 if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))
7112 fQPage |= PGMQPAGE_F_USER_MODE;
7113 int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);
7114 if (RT_SUCCESS(rc))
7115 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
7116 else
7117 {
7118 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
7119# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
7120 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
7121 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
7122# endif
7123 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
7124 }
7125
7126 uint32_t fDataBps;
7127 if ( RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA))
7128 || RT_LIKELY(!(fDataBps = iemMemCheckDataBreakpoint(pVCpu->CTX_SUFF(pVM), pVCpu, GCPtrMem, cbMem, fAccess))))
7129 {
7130 if ( !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)
7131 || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */
7132 {
7133 pTlbe--;
7134 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision;
7135 }
7136 else
7137 {
7138 if (a_fSafeCall)
7139 pVCpu->iem.s.DataTlb.cTlbSafeGlobalLoads++;
7140 else
7141 pVCpu->iem.s.DataTlb.cTlbCoreGlobalLoads++;
7142 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal;
7143 }
7144 }
7145 else
7146 {
7147 /* If we hit a data breakpoint, we use a dummy TLBE to force all accesses
7148 to the page with the data access breakpoint armed on it to pass thru here. */
7149 if (fDataBps > 1)
7150 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp<%d>: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",
7151 a_fSafeCall, fDataBps, GCPtrMem, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7152 pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
7153 pTlbe = &pVCpu->iem.s.DataBreakpointTlbe;
7154 pTlbe->uTag = uTagNoRev;
7155 }
7156 pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A) /* skipping NX */)
7157 | (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE);
7158 RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
7159 pTlbe->GCPhys = GCPhysPg;
7160 pTlbe->pbMappingR3 = NULL;
7161 Assert(!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED)));
7162 Assert(!(pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE));
7163 Assert(!(pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER));
7164
7165 /* Resolve the physical address. */
7166 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));
7167 uint8_t *pbMemFullLoad = NULL;
7168 rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
7169 &pbMemFullLoad, &pTlbe->fFlagsAndPhysRev);
7170 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
7171# ifdef IN_RING3
7172 pTlbe->pbMappingR3 = pbMemFullLoad;
7173# endif
7174 }
7175
7176 /*
7177 * Check the flags and physical revision.
7178 * Note! This will revalidate the uTlbPhysRev after a full load. This is
7179 * just to keep the code structure simple (i.e. avoid gotos or similar).
7180 */
7181 uint8_t *pbMem;
7182 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
7183 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
7184# ifdef IN_RING3
7185 pbMem = pTlbe->pbMappingR3;
7186# else
7187 pbMem = NULL;
7188# endif
7189 else
7190 {
7191 Assert(!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED)));
7192
7193 /*
7194 * Okay, something isn't quite right or needs refreshing.
7195 */
7196 /* Write to read only memory? */
7197 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
7198 {
7199 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
7200# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
7201/** @todo TLB: EPT isn't integrated into the TLB stuff, so we don't know whether
7202 * to trigger an \#PG or a VM nested paging exit here yet! */
7203 if (Walk.fFailed & PGM_WALKFAIL_EPT)
7204 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
7205# endif
7206 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
7207 }
7208
7209 /* Kernel memory accessed by userland? */
7210 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
7211 {
7212 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
7213# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
7214/** @todo TLB: See above. */
7215 if (Walk.fFailed & PGM_WALKFAIL_EPT)
7216 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
7217# endif
7218 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
7219 }
7220
7221 /*
7222 * Check if the physical page info needs updating.
7223 */
7224 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
7225# ifdef IN_RING3
7226 pbMem = pTlbe->pbMappingR3;
7227# else
7228 pbMem = NULL;
7229# endif
7230 else
7231 {
7232 pTlbe->pbMappingR3 = NULL;
7233 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;
7234 pbMem = NULL;
7235 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
7236 &pbMem, &pTlbe->fFlagsAndPhysRev);
7237 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
7238# ifdef IN_RING3
7239 pTlbe->pbMappingR3 = pbMem;
7240# endif
7241 }
7242
7243 /*
7244 * Check the physical page level access and mapping.
7245 */
7246 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
7247 { /* probably likely */ }
7248 else
7249 {
7250 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, pbUnmapInfo, cbMem,
7251 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
7252 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
7253 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
7254 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
7255 if (rcStrict == VINF_SUCCESS)
7256 return pbMem;
7257 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7258 }
7259 }
7260 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
7261
7262 if (pbMem)
7263 {
7264 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
7265 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
7266 fAccess |= IEM_ACCESS_NOT_LOCKED;
7267 }
7268 else
7269 {
7270 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
7271 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
7272 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7273 if (rcStrict == VINF_SUCCESS)
7274 {
7275 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
7276 return pbMem;
7277 }
7278 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7279 }
7280
7281 void * const pvMem = pbMem;
7282
7283 if (fAccess & IEM_ACCESS_TYPE_WRITE)
7284 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
7285 if (fAccess & IEM_ACCESS_TYPE_READ)
7286 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
7287
7288#else /* !IEM_WITH_DATA_TLB */
7289
7290
7291 RTGCPHYS GCPhysFirst;
7292 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
7293 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
7294 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7295
7296 if (fAccess & IEM_ACCESS_TYPE_WRITE)
7297 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
7298 if (fAccess & IEM_ACCESS_TYPE_READ)
7299 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
7300
7301 void *pvMem;
7302 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7303 if (rcStrict == VINF_SUCCESS)
7304 { /* likely */ }
7305 else
7306 {
7307 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
7308 if (rcStrict == VINF_SUCCESS)
7309 return pvMem;
7310 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7311 }
7312
7313#endif /* !IEM_WITH_DATA_TLB */
7314
7315 /*
7316 * Fill in the mapping table entry.
7317 */
7318 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
7319 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
7320 pVCpu->iem.s.iNextMapping = iMemMap + 1;
7321 pVCpu->iem.s.cActiveMappings++;
7322
7323 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
7324
7325 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
7326 return pvMem;
7327}
7328
7329
7330/** @see iemMemMapJmp */
7331static void *iemMemMapSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
7332 uint32_t fAccess, uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
7333{
7334 return iemMemMapJmp<true /*a_fSafeCall*/>(pVCpu, pbUnmapInfo, cbMem, iSegReg, GCPtrMem, fAccess, uAlignCtl);
7335}
7336
7337
7338/**
7339 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
7340 *
7341 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7342 * @param pvMem The mapping.
7343 * @param fAccess The kind of access.
7344 */
7345void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7346{
7347 uintptr_t const iMemMap = bUnmapInfo & 0x7;
7348 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
7349 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
7350 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
7351 == ((unsigned)bUnmapInfo >> 4),
7352 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
7353
7354 /* If it's bounce buffered, we may need to write back the buffer. */
7355 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7356 {
7357 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7358 {
7359 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
7360 if (rcStrict == VINF_SUCCESS)
7361 return;
7362 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7363 }
7364 }
7365 /* Otherwise unlock it. */
7366 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
7367 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7368
7369 /* Free the entry. */
7370 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7371 Assert(pVCpu->iem.s.cActiveMappings != 0);
7372 pVCpu->iem.s.cActiveMappings--;
7373}
7374
7375
7376/** Fallback for iemMemCommitAndUnmapRwJmp. */
7377void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7378{
7379 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
7380 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7381}
7382
7383
7384/** Fallback for iemMemCommitAndUnmapAtJmp. */
7385void iemMemCommitAndUnmapAtSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7386{
7387 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
7388 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7389}
7390
7391
7392/** Fallback for iemMemCommitAndUnmapWoJmp. */
7393void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7394{
7395 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
7396 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7397}
7398
7399
7400/** Fallback for iemMemCommitAndUnmapRoJmp. */
7401void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7402{
7403 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_READ);
7404 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7405}
7406
7407
7408/** Fallback for iemMemRollbackAndUnmapWo. */
7409void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7410{
7411 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
7412 iemMemRollbackAndUnmap(pVCpu, bUnmapInfo);
7413}
7414
7415#endif /* IEM_WITH_SETJMP */
7416
7417#ifndef IN_RING3
7418/**
7419 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
7420 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
7421 *
7422 * Allows the instruction to be completed and retired, while the IEM user will
7423 * return to ring-3 immediately afterwards and do the postponed writes there.
7424 *
7425 * @returns VBox status code (no strict statuses). Caller must check
7426 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
7427 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7428 * @param pvMem The mapping.
7429 * @param fAccess The kind of access.
7430 */
7431VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7432{
7433 uintptr_t const iMemMap = bUnmapInfo & 0x7;
7434 AssertMsgReturn( (bUnmapInfo & 0x08)
7435 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
7436 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
7437 == ((unsigned)bUnmapInfo >> 4),
7438 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
7439 VERR_NOT_FOUND);
7440
7441 /* If it's bounce buffered, we may need to write back the buffer. */
7442 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7443 {
7444 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7445 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
7446 }
7447 /* Otherwise unlock it. */
7448 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
7449 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7450
7451 /* Free the entry. */
7452 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7453 Assert(pVCpu->iem.s.cActiveMappings != 0);
7454 pVCpu->iem.s.cActiveMappings--;
7455 return VINF_SUCCESS;
7456}
7457#endif
7458
7459
7460/**
7461 * Rollbacks mappings, releasing page locks and such.
7462 *
7463 * The caller shall only call this after checking cActiveMappings.
7464 *
7465 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7466 */
7467void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
7468{
7469 Assert(pVCpu->iem.s.cActiveMappings > 0);
7470
7471 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
7472 while (iMemMap-- > 0)
7473 {
7474 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
7475 if (fAccess != IEM_ACCESS_INVALID)
7476 {
7477 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
7478 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7479 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
7480 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7481 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
7482 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
7483 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
7484 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
7485 pVCpu->iem.s.cActiveMappings--;
7486 }
7487 }
7488}
7489
7490
7491/*
7492 * Instantiate R/W templates.
7493 */
7494#define TMPL_MEM_WITH_STACK
7495
7496#define TMPL_MEM_TYPE uint8_t
7497#define TMPL_MEM_FN_SUFF U8
7498#define TMPL_MEM_FMT_TYPE "%#04x"
7499#define TMPL_MEM_FMT_DESC "byte"
7500#include "IEMAllMemRWTmpl.cpp.h"
7501
7502#define TMPL_MEM_TYPE uint16_t
7503#define TMPL_MEM_FN_SUFF U16
7504#define TMPL_MEM_FMT_TYPE "%#06x"
7505#define TMPL_MEM_FMT_DESC "word"
7506#include "IEMAllMemRWTmpl.cpp.h"
7507
7508#define TMPL_WITH_PUSH_SREG
7509#define TMPL_MEM_TYPE uint32_t
7510#define TMPL_MEM_FN_SUFF U32
7511#define TMPL_MEM_FMT_TYPE "%#010x"
7512#define TMPL_MEM_FMT_DESC "dword"
7513#include "IEMAllMemRWTmpl.cpp.h"
7514#undef TMPL_WITH_PUSH_SREG
7515
7516#define TMPL_MEM_TYPE uint64_t
7517#define TMPL_MEM_FN_SUFF U64
7518#define TMPL_MEM_FMT_TYPE "%#018RX64"
7519#define TMPL_MEM_FMT_DESC "qword"
7520#include "IEMAllMemRWTmpl.cpp.h"
7521
7522#undef TMPL_MEM_WITH_STACK
7523
7524#define TMPL_MEM_TYPE uint64_t
7525#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) * 2 - 1)
7526#define TMPL_MEM_FN_SUFF U64AlignedU128
7527#define TMPL_MEM_FMT_TYPE "%#018RX64"
7528#define TMPL_MEM_FMT_DESC "qword"
7529#include "IEMAllMemRWTmpl.cpp.h"
7530
7531/* See IEMAllMemRWTmplInline.cpp.h */
7532#define TMPL_MEM_BY_REF
7533
7534#define TMPL_MEM_TYPE RTFLOAT80U
7535#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1)
7536#define TMPL_MEM_FN_SUFF R80
7537#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
7538#define TMPL_MEM_FMT_DESC "tword"
7539#include "IEMAllMemRWTmpl.cpp.h"
7540
7541#define TMPL_MEM_TYPE RTPBCD80U
7542#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1) /** @todo testcase: 80-bit BCD alignment */
7543#define TMPL_MEM_FN_SUFF D80
7544#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
7545#define TMPL_MEM_FMT_DESC "tword"
7546#include "IEMAllMemRWTmpl.cpp.h"
7547
7548#define TMPL_MEM_TYPE RTUINT128U
7549#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT128U) - 1)
7550#define TMPL_MEM_FN_SUFF U128
7551#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7552#define TMPL_MEM_FMT_DESC "dqword"
7553#include "IEMAllMemRWTmpl.cpp.h"
7554
7555#define TMPL_MEM_TYPE RTUINT128U
7556#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT128U) - 1)
7557#define TMPL_MEM_MAP_FLAGS_ADD (IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE)
7558#define TMPL_MEM_FN_SUFF U128AlignedSse
7559#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7560#define TMPL_MEM_FMT_DESC "dqword"
7561#include "IEMAllMemRWTmpl.cpp.h"
7562
7563#define TMPL_MEM_TYPE RTUINT128U
7564#define TMPL_MEM_TYPE_ALIGN 0
7565#define TMPL_MEM_FN_SUFF U128NoAc
7566#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7567#define TMPL_MEM_FMT_DESC "dqword"
7568#include "IEMAllMemRWTmpl.cpp.h"
7569
7570#define TMPL_MEM_TYPE RTUINT256U
7571#define TMPL_MEM_TYPE_ALIGN 0
7572#define TMPL_MEM_FN_SUFF U256NoAc
7573#define TMPL_MEM_FMT_TYPE "%.32Rhxs"
7574#define TMPL_MEM_FMT_DESC "qqword"
7575#include "IEMAllMemRWTmpl.cpp.h"
7576
7577#define TMPL_MEM_TYPE RTUINT256U
7578#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT256U) - 1)
7579#define TMPL_MEM_MAP_FLAGS_ADD IEM_MEMMAP_F_ALIGN_GP
7580#define TMPL_MEM_FN_SUFF U256AlignedAvx
7581#define TMPL_MEM_FMT_TYPE "%.32Rhxs"
7582#define TMPL_MEM_FMT_DESC "qqword"
7583#include "IEMAllMemRWTmpl.cpp.h"
7584
7585/**
7586 * Fetches a data dword and zero extends it to a qword.
7587 *
7588 * @returns Strict VBox status code.
7589 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7590 * @param pu64Dst Where to return the qword.
7591 * @param iSegReg The index of the segment register to use for
7592 * this access. The base and limits are checked.
7593 * @param GCPtrMem The address of the guest memory.
7594 */
7595VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7596{
7597 /* The lazy approach for now... */
7598 uint8_t bUnmapInfo;
7599 uint32_t const *pu32Src;
7600 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem,
7601 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7602 if (rc == VINF_SUCCESS)
7603 {
7604 *pu64Dst = *pu32Src;
7605 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7606 Log(("IEM RD dword %d|%RGv: %#010RX64\n", iSegReg, GCPtrMem, *pu64Dst));
7607 }
7608 return rc;
7609}
7610
7611
7612#ifdef SOME_UNUSED_FUNCTION
7613/**
7614 * Fetches a data dword and sign extends it to a qword.
7615 *
7616 * @returns Strict VBox status code.
7617 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7618 * @param pu64Dst Where to return the sign extended value.
7619 * @param iSegReg The index of the segment register to use for
7620 * this access. The base and limits are checked.
7621 * @param GCPtrMem The address of the guest memory.
7622 */
7623VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7624{
7625 /* The lazy approach for now... */
7626 uint8_t bUnmapInfo;
7627 int32_t const *pi32Src;
7628 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, &bUnmapInfo, sizeof(*pi32Src), iSegReg, GCPtrMem,
7629 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
7630 if (rc == VINF_SUCCESS)
7631 {
7632 *pu64Dst = *pi32Src;
7633 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7634 Log(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, (uint32_t)*pu64Dst));
7635 }
7636#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7637 else
7638 *pu64Dst = 0;
7639#endif
7640 return rc;
7641}
7642#endif
7643
7644
7645/**
7646 * Fetches a descriptor register (lgdt, lidt).
7647 *
7648 * @returns Strict VBox status code.
7649 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7650 * @param pcbLimit Where to return the limit.
7651 * @param pGCPtrBase Where to return the base.
7652 * @param iSegReg The index of the segment register to use for
7653 * this access. The base and limits are checked.
7654 * @param GCPtrMem The address of the guest memory.
7655 * @param enmOpSize The effective operand size.
7656 */
7657VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7658 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7659{
7660 /*
7661 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7662 * little special:
7663 * - The two reads are done separately.
7664 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7665 * - We suspect the 386 to actually commit the limit before the base in
7666 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7667 * don't try emulate this eccentric behavior, because it's not well
7668 * enough understood and rather hard to trigger.
7669 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7670 */
7671 VBOXSTRICTRC rcStrict;
7672 if (IEM_IS_64BIT_CODE(pVCpu))
7673 {
7674 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7675 if (rcStrict == VINF_SUCCESS)
7676 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7677 }
7678 else
7679 {
7680 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7681 if (enmOpSize == IEMMODE_32BIT)
7682 {
7683 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7684 {
7685 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7686 if (rcStrict == VINF_SUCCESS)
7687 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7688 }
7689 else
7690 {
7691 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7692 if (rcStrict == VINF_SUCCESS)
7693 {
7694 *pcbLimit = (uint16_t)uTmp;
7695 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7696 }
7697 }
7698 if (rcStrict == VINF_SUCCESS)
7699 *pGCPtrBase = uTmp;
7700 }
7701 else
7702 {
7703 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7704 if (rcStrict == VINF_SUCCESS)
7705 {
7706 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7707 if (rcStrict == VINF_SUCCESS)
7708 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7709 }
7710 }
7711 }
7712 return rcStrict;
7713}
7714
7715
7716/**
7717 * Stores a data dqword, SSE aligned.
7718 *
7719 * @returns Strict VBox status code.
7720 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7721 * @param iSegReg The index of the segment register to use for
7722 * this access. The base and limits are checked.
7723 * @param GCPtrMem The address of the guest memory.
7724 * @param u128Value The value to store.
7725 */
7726VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7727{
7728 /* The lazy approach for now... */
7729 uint8_t bUnmapInfo;
7730 PRTUINT128U pu128Dst;
7731 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7732 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7733 if (rc == VINF_SUCCESS)
7734 {
7735 pu128Dst->au64[0] = u128Value.au64[0];
7736 pu128Dst->au64[1] = u128Value.au64[1];
7737 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7738 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7739 }
7740 return rc;
7741}
7742
7743
7744#ifdef IEM_WITH_SETJMP
7745/**
7746 * Stores a data dqword, SSE aligned.
7747 *
7748 * @returns Strict VBox status code.
7749 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7750 * @param iSegReg The index of the segment register to use for
7751 * this access. The base and limits are checked.
7752 * @param GCPtrMem The address of the guest memory.
7753 * @param u128Value The value to store.
7754 */
7755void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7756 RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7757{
7758 /* The lazy approach for now... */
7759 uint8_t bUnmapInfo;
7760 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7761 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7762 pu128Dst->au64[0] = u128Value.au64[0];
7763 pu128Dst->au64[1] = u128Value.au64[1];
7764 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7765 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7766}
7767#endif
7768
7769
7770/**
7771 * Stores a data dqword.
7772 *
7773 * @returns Strict VBox status code.
7774 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7775 * @param iSegReg The index of the segment register to use for
7776 * this access. The base and limits are checked.
7777 * @param GCPtrMem The address of the guest memory.
7778 * @param pu256Value Pointer to the value to store.
7779 */
7780VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7781{
7782 /* The lazy approach for now... */
7783 uint8_t bUnmapInfo;
7784 PRTUINT256U pu256Dst;
7785 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7786 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7787 if (rc == VINF_SUCCESS)
7788 {
7789 pu256Dst->au64[0] = pu256Value->au64[0];
7790 pu256Dst->au64[1] = pu256Value->au64[1];
7791 pu256Dst->au64[2] = pu256Value->au64[2];
7792 pu256Dst->au64[3] = pu256Value->au64[3];
7793 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7794 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7795 }
7796 return rc;
7797}
7798
7799
7800#ifdef IEM_WITH_SETJMP
7801/**
7802 * Stores a data dqword, longjmp on error.
7803 *
7804 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7805 * @param iSegReg The index of the segment register to use for
7806 * this access. The base and limits are checked.
7807 * @param GCPtrMem The address of the guest memory.
7808 * @param pu256Value Pointer to the value to store.
7809 */
7810void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7811{
7812 /* The lazy approach for now... */
7813 uint8_t bUnmapInfo;
7814 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7815 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7816 pu256Dst->au64[0] = pu256Value->au64[0];
7817 pu256Dst->au64[1] = pu256Value->au64[1];
7818 pu256Dst->au64[2] = pu256Value->au64[2];
7819 pu256Dst->au64[3] = pu256Value->au64[3];
7820 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7821 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7822}
7823#endif
7824
7825
7826/**
7827 * Stores a descriptor register (sgdt, sidt).
7828 *
7829 * @returns Strict VBox status code.
7830 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7831 * @param cbLimit The limit.
7832 * @param GCPtrBase The base address.
7833 * @param iSegReg The index of the segment register to use for
7834 * this access. The base and limits are checked.
7835 * @param GCPtrMem The address of the guest memory.
7836 */
7837VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7838{
7839 /*
7840 * The SIDT and SGDT instructions actually stores the data using two
7841 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
7842 * does not respond to opsize prefixes.
7843 */
7844 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
7845 if (rcStrict == VINF_SUCCESS)
7846 {
7847 if (IEM_IS_16BIT_CODE(pVCpu))
7848 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
7849 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
7850 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
7851 else if (IEM_IS_32BIT_CODE(pVCpu))
7852 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
7853 else
7854 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
7855 }
7856 return rcStrict;
7857}
7858
7859
7860/**
7861 * Begin a special stack push (used by interrupt, exceptions and such).
7862 *
7863 * This will raise \#SS or \#PF if appropriate.
7864 *
7865 * @returns Strict VBox status code.
7866 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7867 * @param cbMem The number of bytes to push onto the stack.
7868 * @param cbAlign The alignment mask (7, 3, 1).
7869 * @param ppvMem Where to return the pointer to the stack memory.
7870 * As with the other memory functions this could be
7871 * direct access or bounce buffered access, so
7872 * don't commit register until the commit call
7873 * succeeds.
7874 * @param pbUnmapInfo Where to store unmap info for
7875 * iemMemStackPushCommitSpecial.
7876 * @param puNewRsp Where to return the new RSP value. This must be
7877 * passed unchanged to
7878 * iemMemStackPushCommitSpecial().
7879 */
7880VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
7881 void **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
7882{
7883 Assert(cbMem < UINT8_MAX);
7884 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
7885 return iemMemMap(pVCpu, ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W, cbAlign);
7886}
7887
7888
7889/**
7890 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
7891 *
7892 * This will update the rSP.
7893 *
7894 * @returns Strict VBox status code.
7895 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7896 * @param bUnmapInfo Unmap info set by iemMemStackPushBeginSpecial.
7897 * @param uNewRsp The new RSP value returned by
7898 * iemMemStackPushBeginSpecial().
7899 */
7900VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo, uint64_t uNewRsp) RT_NOEXCEPT
7901{
7902 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7903 if (rcStrict == VINF_SUCCESS)
7904 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7905 return rcStrict;
7906}
7907
7908
7909/**
7910 * Begin a special stack pop (used by iret, retf and such).
7911 *
7912 * This will raise \#SS or \#PF if appropriate.
7913 *
7914 * @returns Strict VBox status code.
7915 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7916 * @param cbMem The number of bytes to pop from the stack.
7917 * @param cbAlign The alignment mask (7, 3, 1).
7918 * @param ppvMem Where to return the pointer to the stack memory.
7919 * @param pbUnmapInfo Where to store unmap info for
7920 * iemMemStackPopDoneSpecial.
7921 * @param puNewRsp Where to return the new RSP value. This must be
7922 * assigned to CPUMCTX::rsp manually some time
7923 * after iemMemStackPopDoneSpecial() has been
7924 * called.
7925 */
7926VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
7927 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
7928{
7929 Assert(cbMem < UINT8_MAX);
7930 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
7931 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
7932}
7933
7934
7935/**
7936 * Continue a special stack pop (used by iret and retf), for the purpose of
7937 * retrieving a new stack pointer.
7938 *
7939 * This will raise \#SS or \#PF if appropriate.
7940 *
7941 * @returns Strict VBox status code.
7942 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7943 * @param off Offset from the top of the stack. This is zero
7944 * except in the retf case.
7945 * @param cbMem The number of bytes to pop from the stack.
7946 * @param ppvMem Where to return the pointer to the stack memory.
7947 * @param pbUnmapInfo Where to store unmap info for
7948 * iemMemStackPopDoneSpecial.
7949 * @param uCurNewRsp The current uncommitted RSP value. (No need to
7950 * return this because all use of this function is
7951 * to retrieve a new value and anything we return
7952 * here would be discarded.)
7953 */
7954VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
7955 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t uCurNewRsp) RT_NOEXCEPT
7956{
7957 Assert(cbMem < UINT8_MAX);
7958
7959 /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */
7960 RTGCPTR GCPtrTop;
7961 if (IEM_IS_64BIT_CODE(pVCpu))
7962 GCPtrTop = uCurNewRsp;
7963 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
7964 GCPtrTop = (uint32_t)uCurNewRsp;
7965 else
7966 GCPtrTop = (uint16_t)uCurNewRsp;
7967
7968 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
7969 0 /* checked in iemMemStackPopBeginSpecial */);
7970}
7971
7972
7973/**
7974 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
7975 * iemMemStackPopContinueSpecial).
7976 *
7977 * The caller will manually commit the rSP.
7978 *
7979 * @returns Strict VBox status code.
7980 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7981 * @param bUnmapInfo Unmap information returned by
7982 * iemMemStackPopBeginSpecial() or
7983 * iemMemStackPopContinueSpecial().
7984 */
7985VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7986{
7987 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7988}
7989
7990
7991/**
7992 * Fetches a system table byte.
7993 *
7994 * @returns Strict VBox status code.
7995 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7996 * @param pbDst Where to return the byte.
7997 * @param iSegReg The index of the segment register to use for
7998 * this access. The base and limits are checked.
7999 * @param GCPtrMem The address of the guest memory.
8000 */
8001VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8002{
8003 /* The lazy approach for now... */
8004 uint8_t bUnmapInfo;
8005 uint8_t const *pbSrc;
8006 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, &bUnmapInfo, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8007 if (rc == VINF_SUCCESS)
8008 {
8009 *pbDst = *pbSrc;
8010 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8011 }
8012 return rc;
8013}
8014
8015
8016/**
8017 * Fetches a system table word.
8018 *
8019 * @returns Strict VBox status code.
8020 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8021 * @param pu16Dst Where to return the word.
8022 * @param iSegReg The index of the segment register to use for
8023 * this access. The base and limits are checked.
8024 * @param GCPtrMem The address of the guest memory.
8025 */
8026VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8027{
8028 /* The lazy approach for now... */
8029 uint8_t bUnmapInfo;
8030 uint16_t const *pu16Src;
8031 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, &bUnmapInfo, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8032 if (rc == VINF_SUCCESS)
8033 {
8034 *pu16Dst = *pu16Src;
8035 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8036 }
8037 return rc;
8038}
8039
8040
8041/**
8042 * Fetches a system table dword.
8043 *
8044 * @returns Strict VBox status code.
8045 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8046 * @param pu32Dst Where to return the dword.
8047 * @param iSegReg The index of the segment register to use for
8048 * this access. The base and limits are checked.
8049 * @param GCPtrMem The address of the guest memory.
8050 */
8051VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8052{
8053 /* The lazy approach for now... */
8054 uint8_t bUnmapInfo;
8055 uint32_t const *pu32Src;
8056 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8057 if (rc == VINF_SUCCESS)
8058 {
8059 *pu32Dst = *pu32Src;
8060 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8061 }
8062 return rc;
8063}
8064
8065
8066/**
8067 * Fetches a system table qword.
8068 *
8069 * @returns Strict VBox status code.
8070 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8071 * @param pu64Dst Where to return the qword.
8072 * @param iSegReg The index of the segment register to use for
8073 * this access. The base and limits are checked.
8074 * @param GCPtrMem The address of the guest memory.
8075 */
8076VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8077{
8078 /* The lazy approach for now... */
8079 uint8_t bUnmapInfo;
8080 uint64_t const *pu64Src;
8081 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, &bUnmapInfo, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8082 if (rc == VINF_SUCCESS)
8083 {
8084 *pu64Dst = *pu64Src;
8085 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8086 }
8087 return rc;
8088}
8089
8090
8091/**
8092 * Fetches a descriptor table entry with caller specified error code.
8093 *
8094 * @returns Strict VBox status code.
8095 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8096 * @param pDesc Where to return the descriptor table entry.
8097 * @param uSel The selector which table entry to fetch.
8098 * @param uXcpt The exception to raise on table lookup error.
8099 * @param uErrorCode The error code associated with the exception.
8100 */
8101static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
8102 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
8103{
8104 AssertPtr(pDesc);
8105 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
8106
8107 /** @todo did the 286 require all 8 bytes to be accessible? */
8108 /*
8109 * Get the selector table base and check bounds.
8110 */
8111 RTGCPTR GCPtrBase;
8112 if (uSel & X86_SEL_LDT)
8113 {
8114 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
8115 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
8116 {
8117 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8118 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
8119 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8120 uErrorCode, 0);
8121 }
8122
8123 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
8124 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
8125 }
8126 else
8127 {
8128 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
8129 {
8130 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
8131 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8132 uErrorCode, 0);
8133 }
8134 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
8135 }
8136
8137 /*
8138 * Read the legacy descriptor and maybe the long mode extensions if
8139 * required.
8140 */
8141 VBOXSTRICTRC rcStrict;
8142 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
8143 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8144 else
8145 {
8146 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
8147 if (rcStrict == VINF_SUCCESS)
8148 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
8149 if (rcStrict == VINF_SUCCESS)
8150 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
8151 if (rcStrict == VINF_SUCCESS)
8152 pDesc->Legacy.au16[3] = 0;
8153 else
8154 return rcStrict;
8155 }
8156
8157 if (rcStrict == VINF_SUCCESS)
8158 {
8159 if ( !IEM_IS_LONG_MODE(pVCpu)
8160 || pDesc->Legacy.Gen.u1DescType)
8161 pDesc->Long.au64[1] = 0;
8162 else if ( (uint32_t)(uSel | X86_SEL_RPL_LDT) + 8
8163 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
8164 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8165 else
8166 {
8167 LogEx(LOG_GROUP_IEM,("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8168 /** @todo is this the right exception? */
8169 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8170 }
8171 }
8172 return rcStrict;
8173}
8174
8175
8176/**
8177 * Fetches a descriptor table entry.
8178 *
8179 * @returns Strict VBox status code.
8180 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8181 * @param pDesc Where to return the descriptor table entry.
8182 * @param uSel The selector which table entry to fetch.
8183 * @param uXcpt The exception to raise on table lookup error.
8184 */
8185VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
8186{
8187 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8188}
8189
8190
8191/**
8192 * Marks the selector descriptor as accessed (only non-system descriptors).
8193 *
8194 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8195 * will therefore skip the limit checks.
8196 *
8197 * @returns Strict VBox status code.
8198 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8199 * @param uSel The selector.
8200 */
8201VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
8202{
8203 /*
8204 * Get the selector table base and calculate the entry address.
8205 */
8206 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8207 ? pVCpu->cpum.GstCtx.ldtr.u64Base
8208 : pVCpu->cpum.GstCtx.gdtr.pGdt;
8209 GCPtr += uSel & X86_SEL_MASK;
8210
8211 /*
8212 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8213 * ugly stuff to avoid this. This will make sure it's an atomic access
8214 * as well more or less remove any question about 8-bit or 32-bit accesss.
8215 */
8216 VBOXSTRICTRC rcStrict;
8217 uint8_t bUnmapInfo;
8218 uint32_t volatile *pu32;
8219 if ((GCPtr & 3) == 0)
8220 {
8221 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8222 GCPtr += 2 + 2;
8223 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8224 if (rcStrict != VINF_SUCCESS)
8225 return rcStrict;
8226 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8227 }
8228 else
8229 {
8230 /* The misaligned GDT/LDT case, map the whole thing. */
8231 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8232 if (rcStrict != VINF_SUCCESS)
8233 return rcStrict;
8234 switch ((uintptr_t)pu32 & 3)
8235 {
8236 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8237 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8238 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8239 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8240 }
8241 }
8242
8243 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8244}
8245
8246
8247#undef LOG_GROUP
8248#define LOG_GROUP LOG_GROUP_IEM
8249
8250/** @} */
8251
8252/** @name Opcode Helpers.
8253 * @{
8254 */
8255
8256/**
8257 * Calculates the effective address of a ModR/M memory operand.
8258 *
8259 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8260 *
8261 * @return Strict VBox status code.
8262 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8263 * @param bRm The ModRM byte.
8264 * @param cbImmAndRspOffset - First byte: The size of any immediate
8265 * following the effective address opcode bytes
8266 * (only for RIP relative addressing).
8267 * - Second byte: RSP displacement (for POP [ESP]).
8268 * @param pGCPtrEff Where to return the effective address.
8269 */
8270VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
8271{
8272 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8273# define SET_SS_DEF() \
8274 do \
8275 { \
8276 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8277 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8278 } while (0)
8279
8280 if (!IEM_IS_64BIT_CODE(pVCpu))
8281 {
8282/** @todo Check the effective address size crap! */
8283 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8284 {
8285 uint16_t u16EffAddr;
8286
8287 /* Handle the disp16 form with no registers first. */
8288 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8289 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8290 else
8291 {
8292 /* Get the displacment. */
8293 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8294 {
8295 case 0: u16EffAddr = 0; break;
8296 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8297 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8298 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8299 }
8300
8301 /* Add the base and index registers to the disp. */
8302 switch (bRm & X86_MODRM_RM_MASK)
8303 {
8304 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8305 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8306 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8307 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8308 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8309 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8310 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8311 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8312 }
8313 }
8314
8315 *pGCPtrEff = u16EffAddr;
8316 }
8317 else
8318 {
8319 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8320 uint32_t u32EffAddr;
8321
8322 /* Handle the disp32 form with no registers first. */
8323 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8324 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8325 else
8326 {
8327 /* Get the register (or SIB) value. */
8328 switch ((bRm & X86_MODRM_RM_MASK))
8329 {
8330 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8331 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8332 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8333 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8334 case 4: /* SIB */
8335 {
8336 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8337
8338 /* Get the index and scale it. */
8339 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8340 {
8341 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8342 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8343 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8344 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8345 case 4: u32EffAddr = 0; /*none */ break;
8346 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8347 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8348 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8349 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8350 }
8351 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8352
8353 /* add base */
8354 switch (bSib & X86_SIB_BASE_MASK)
8355 {
8356 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8357 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8358 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8359 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8360 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8361 case 5:
8362 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8363 {
8364 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8365 SET_SS_DEF();
8366 }
8367 else
8368 {
8369 uint32_t u32Disp;
8370 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8371 u32EffAddr += u32Disp;
8372 }
8373 break;
8374 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8375 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8376 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8377 }
8378 break;
8379 }
8380 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8381 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8382 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8383 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8384 }
8385
8386 /* Get and add the displacement. */
8387 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8388 {
8389 case 0:
8390 break;
8391 case 1:
8392 {
8393 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8394 u32EffAddr += i8Disp;
8395 break;
8396 }
8397 case 2:
8398 {
8399 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8400 u32EffAddr += u32Disp;
8401 break;
8402 }
8403 default:
8404 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8405 }
8406
8407 }
8408 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8409 *pGCPtrEff = u32EffAddr;
8410 }
8411 }
8412 else
8413 {
8414 uint64_t u64EffAddr;
8415
8416 /* Handle the rip+disp32 form with no registers first. */
8417 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8418 {
8419 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8420 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8421 }
8422 else
8423 {
8424 /* Get the register (or SIB) value. */
8425 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8426 {
8427 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8428 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8429 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8430 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8431 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8432 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8433 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8434 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8435 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8436 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8437 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8438 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8439 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8440 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8441 /* SIB */
8442 case 4:
8443 case 12:
8444 {
8445 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8446
8447 /* Get the index and scale it. */
8448 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8449 {
8450 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8451 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8452 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8453 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8454 case 4: u64EffAddr = 0; /*none */ break;
8455 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8456 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8457 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8458 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8459 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8460 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8461 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8462 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8463 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8464 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8465 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8466 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8467 }
8468 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8469
8470 /* add base */
8471 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8472 {
8473 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8474 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8475 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8476 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8477 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8478 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8479 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8480 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8481 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8482 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8483 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8484 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8485 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8486 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8487 /* complicated encodings */
8488 case 5:
8489 case 13:
8490 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8491 {
8492 if (!pVCpu->iem.s.uRexB)
8493 {
8494 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8495 SET_SS_DEF();
8496 }
8497 else
8498 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8499 }
8500 else
8501 {
8502 uint32_t u32Disp;
8503 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8504 u64EffAddr += (int32_t)u32Disp;
8505 }
8506 break;
8507 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8508 }
8509 break;
8510 }
8511 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8512 }
8513
8514 /* Get and add the displacement. */
8515 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8516 {
8517 case 0:
8518 break;
8519 case 1:
8520 {
8521 int8_t i8Disp;
8522 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8523 u64EffAddr += i8Disp;
8524 break;
8525 }
8526 case 2:
8527 {
8528 uint32_t u32Disp;
8529 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8530 u64EffAddr += (int32_t)u32Disp;
8531 break;
8532 }
8533 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8534 }
8535
8536 }
8537
8538 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8539 *pGCPtrEff = u64EffAddr;
8540 else
8541 {
8542 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8543 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8544 }
8545 }
8546
8547 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
8548 return VINF_SUCCESS;
8549}
8550
8551
8552#ifdef IEM_WITH_SETJMP
8553/**
8554 * Calculates the effective address of a ModR/M memory operand.
8555 *
8556 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8557 *
8558 * May longjmp on internal error.
8559 *
8560 * @return The effective address.
8561 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8562 * @param bRm The ModRM byte.
8563 * @param cbImmAndRspOffset - First byte: The size of any immediate
8564 * following the effective address opcode bytes
8565 * (only for RIP relative addressing).
8566 * - Second byte: RSP displacement (for POP [ESP]).
8567 */
8568RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP
8569{
8570 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
8571# define SET_SS_DEF() \
8572 do \
8573 { \
8574 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8575 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8576 } while (0)
8577
8578 if (!IEM_IS_64BIT_CODE(pVCpu))
8579 {
8580/** @todo Check the effective address size crap! */
8581 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8582 {
8583 uint16_t u16EffAddr;
8584
8585 /* Handle the disp16 form with no registers first. */
8586 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8587 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8588 else
8589 {
8590 /* Get the displacment. */
8591 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8592 {
8593 case 0: u16EffAddr = 0; break;
8594 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8595 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8596 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */
8597 }
8598
8599 /* Add the base and index registers to the disp. */
8600 switch (bRm & X86_MODRM_RM_MASK)
8601 {
8602 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8603 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8604 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8605 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8606 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8607 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8608 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8609 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8610 }
8611 }
8612
8613 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
8614 return u16EffAddr;
8615 }
8616
8617 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8618 uint32_t u32EffAddr;
8619
8620 /* Handle the disp32 form with no registers first. */
8621 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8622 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8623 else
8624 {
8625 /* Get the register (or SIB) value. */
8626 switch ((bRm & X86_MODRM_RM_MASK))
8627 {
8628 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8629 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8630 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8631 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8632 case 4: /* SIB */
8633 {
8634 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8635
8636 /* Get the index and scale it. */
8637 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8638 {
8639 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8640 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8641 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8642 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8643 case 4: u32EffAddr = 0; /*none */ break;
8644 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8645 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8646 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8647 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8648 }
8649 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8650
8651 /* add base */
8652 switch (bSib & X86_SIB_BASE_MASK)
8653 {
8654 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8655 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8656 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8657 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8658 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8659 case 5:
8660 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8661 {
8662 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8663 SET_SS_DEF();
8664 }
8665 else
8666 {
8667 uint32_t u32Disp;
8668 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8669 u32EffAddr += u32Disp;
8670 }
8671 break;
8672 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8673 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8674 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8675 }
8676 break;
8677 }
8678 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8679 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8680 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8681 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8682 }
8683
8684 /* Get and add the displacement. */
8685 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8686 {
8687 case 0:
8688 break;
8689 case 1:
8690 {
8691 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8692 u32EffAddr += i8Disp;
8693 break;
8694 }
8695 case 2:
8696 {
8697 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8698 u32EffAddr += u32Disp;
8699 break;
8700 }
8701 default:
8702 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */
8703 }
8704 }
8705
8706 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8707 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
8708 return u32EffAddr;
8709 }
8710
8711 uint64_t u64EffAddr;
8712
8713 /* Handle the rip+disp32 form with no registers first. */
8714 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8715 {
8716 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8717 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8718 }
8719 else
8720 {
8721 /* Get the register (or SIB) value. */
8722 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8723 {
8724 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8725 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8726 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8727 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8728 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8729 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8730 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8731 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8732 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8733 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8734 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8735 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8736 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8737 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8738 /* SIB */
8739 case 4:
8740 case 12:
8741 {
8742 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8743
8744 /* Get the index and scale it. */
8745 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8746 {
8747 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8748 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8749 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8750 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8751 case 4: u64EffAddr = 0; /*none */ break;
8752 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8753 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8754 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8755 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8756 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8757 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8758 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8759 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8760 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8761 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8762 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8763 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8764 }
8765 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8766
8767 /* add base */
8768 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8769 {
8770 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8771 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8772 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8773 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8774 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8775 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8776 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8777 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8778 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8779 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8780 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8781 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8782 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8783 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8784 /* complicated encodings */
8785 case 5:
8786 case 13:
8787 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8788 {
8789 if (!pVCpu->iem.s.uRexB)
8790 {
8791 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8792 SET_SS_DEF();
8793 }
8794 else
8795 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8796 }
8797 else
8798 {
8799 uint32_t u32Disp;
8800 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8801 u64EffAddr += (int32_t)u32Disp;
8802 }
8803 break;
8804 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8805 }
8806 break;
8807 }
8808 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8809 }
8810
8811 /* Get and add the displacement. */
8812 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8813 {
8814 case 0:
8815 break;
8816 case 1:
8817 {
8818 int8_t i8Disp;
8819 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8820 u64EffAddr += i8Disp;
8821 break;
8822 }
8823 case 2:
8824 {
8825 uint32_t u32Disp;
8826 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8827 u64EffAddr += (int32_t)u32Disp;
8828 break;
8829 }
8830 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
8831 }
8832
8833 }
8834
8835 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8836 {
8837 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
8838 return u64EffAddr;
8839 }
8840 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8841 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
8842 return u64EffAddr & UINT32_MAX;
8843}
8844#endif /* IEM_WITH_SETJMP */
8845
8846
8847/**
8848 * Calculates the effective address of a ModR/M memory operand, extended version
8849 * for use in the recompilers.
8850 *
8851 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8852 *
8853 * @return Strict VBox status code.
8854 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8855 * @param bRm The ModRM byte.
8856 * @param cbImmAndRspOffset - First byte: The size of any immediate
8857 * following the effective address opcode bytes
8858 * (only for RIP relative addressing).
8859 * - Second byte: RSP displacement (for POP [ESP]).
8860 * @param pGCPtrEff Where to return the effective address.
8861 * @param puInfo Extra info: 32-bit displacement (bits 31:0) and
8862 * SIB byte (bits 39:32).
8863 */
8864VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT
8865{
8866 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8867# define SET_SS_DEF() \
8868 do \
8869 { \
8870 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8871 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8872 } while (0)
8873
8874 uint64_t uInfo;
8875 if (!IEM_IS_64BIT_CODE(pVCpu))
8876 {
8877/** @todo Check the effective address size crap! */
8878 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8879 {
8880 uint16_t u16EffAddr;
8881
8882 /* Handle the disp16 form with no registers first. */
8883 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8884 {
8885 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8886 uInfo = u16EffAddr;
8887 }
8888 else
8889 {
8890 /* Get the displacment. */
8891 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8892 {
8893 case 0: u16EffAddr = 0; break;
8894 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8895 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8896 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8897 }
8898 uInfo = u16EffAddr;
8899
8900 /* Add the base and index registers to the disp. */
8901 switch (bRm & X86_MODRM_RM_MASK)
8902 {
8903 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8904 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8905 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8906 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8907 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8908 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8909 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8910 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8911 }
8912 }
8913
8914 *pGCPtrEff = u16EffAddr;
8915 }
8916 else
8917 {
8918 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8919 uint32_t u32EffAddr;
8920
8921 /* Handle the disp32 form with no registers first. */
8922 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8923 {
8924 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8925 uInfo = u32EffAddr;
8926 }
8927 else
8928 {
8929 /* Get the register (or SIB) value. */
8930 uInfo = 0;
8931 switch ((bRm & X86_MODRM_RM_MASK))
8932 {
8933 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8934 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8935 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8936 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8937 case 4: /* SIB */
8938 {
8939 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8940 uInfo = (uint64_t)bSib << 32;
8941
8942 /* Get the index and scale it. */
8943 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8944 {
8945 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8946 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8947 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8948 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8949 case 4: u32EffAddr = 0; /*none */ break;
8950 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8951 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8952 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8953 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8954 }
8955 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8956
8957 /* add base */
8958 switch (bSib & X86_SIB_BASE_MASK)
8959 {
8960 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8961 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8962 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8963 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8964 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8965 case 5:
8966 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8967 {
8968 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8969 SET_SS_DEF();
8970 }
8971 else
8972 {
8973 uint32_t u32Disp;
8974 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8975 u32EffAddr += u32Disp;
8976 uInfo |= u32Disp;
8977 }
8978 break;
8979 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8980 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8981 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8982 }
8983 break;
8984 }
8985 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8986 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8987 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8988 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8989 }
8990
8991 /* Get and add the displacement. */
8992 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8993 {
8994 case 0:
8995 break;
8996 case 1:
8997 {
8998 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8999 u32EffAddr += i8Disp;
9000 uInfo |= (uint32_t)(int32_t)i8Disp;
9001 break;
9002 }
9003 case 2:
9004 {
9005 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9006 u32EffAddr += u32Disp;
9007 uInfo |= (uint32_t)u32Disp;
9008 break;
9009 }
9010 default:
9011 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
9012 }
9013
9014 }
9015 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9016 *pGCPtrEff = u32EffAddr;
9017 }
9018 }
9019 else
9020 {
9021 uint64_t u64EffAddr;
9022
9023 /* Handle the rip+disp32 form with no registers first. */
9024 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9025 {
9026 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9027 uInfo = (uint32_t)u64EffAddr;
9028 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
9029 }
9030 else
9031 {
9032 /* Get the register (or SIB) value. */
9033 uInfo = 0;
9034 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9035 {
9036 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9037 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9038 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9039 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9040 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9041 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9042 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9043 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9044 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9045 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9046 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9047 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9048 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9049 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9050 /* SIB */
9051 case 4:
9052 case 12:
9053 {
9054 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9055 uInfo = (uint64_t)bSib << 32;
9056
9057 /* Get the index and scale it. */
9058 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9059 {
9060 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9061 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9062 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9063 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9064 case 4: u64EffAddr = 0; /*none */ break;
9065 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9066 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9067 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9068 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9069 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9070 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9071 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9072 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9073 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9074 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9075 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9076 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9077 }
9078 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9079
9080 /* add base */
9081 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9082 {
9083 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9084 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9085 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9086 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9087 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9088 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9089 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9090 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9091 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9092 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9093 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9094 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9095 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9096 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9097 /* complicated encodings */
9098 case 5:
9099 case 13:
9100 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9101 {
9102 if (!pVCpu->iem.s.uRexB)
9103 {
9104 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9105 SET_SS_DEF();
9106 }
9107 else
9108 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9109 }
9110 else
9111 {
9112 uint32_t u32Disp;
9113 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9114 u64EffAddr += (int32_t)u32Disp;
9115 uInfo |= u32Disp;
9116 }
9117 break;
9118 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9119 }
9120 break;
9121 }
9122 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9123 }
9124
9125 /* Get and add the displacement. */
9126 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9127 {
9128 case 0:
9129 break;
9130 case 1:
9131 {
9132 int8_t i8Disp;
9133 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9134 u64EffAddr += i8Disp;
9135 uInfo |= (uint32_t)(int32_t)i8Disp;
9136 break;
9137 }
9138 case 2:
9139 {
9140 uint32_t u32Disp;
9141 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9142 u64EffAddr += (int32_t)u32Disp;
9143 uInfo |= u32Disp;
9144 break;
9145 }
9146 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9147 }
9148
9149 }
9150
9151 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9152 *pGCPtrEff = u64EffAddr;
9153 else
9154 {
9155 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9156 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9157 }
9158 }
9159 *puInfo = uInfo;
9160
9161 Log5(("iemOpHlpCalcRmEffAddrEx: EffAddr=%#010RGv uInfo=%RX64\n", *pGCPtrEff, uInfo));
9162 return VINF_SUCCESS;
9163}
9164
9165/** @} */
9166
9167
9168#ifdef LOG_ENABLED
9169/**
9170 * Logs the current instruction.
9171 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9172 * @param fSameCtx Set if we have the same context information as the VMM,
9173 * clear if we may have already executed an instruction in
9174 * our debug context. When clear, we assume IEMCPU holds
9175 * valid CPU mode info.
9176 *
9177 * The @a fSameCtx parameter is now misleading and obsolete.
9178 * @param pszFunction The IEM function doing the execution.
9179 */
9180static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
9181{
9182# ifdef IN_RING3
9183 if (LogIs2Enabled())
9184 {
9185 char szInstr[256];
9186 uint32_t cbInstr = 0;
9187 if (fSameCtx)
9188 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
9189 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9190 szInstr, sizeof(szInstr), &cbInstr);
9191 else
9192 {
9193 uint32_t fFlags = 0;
9194 switch (IEM_GET_CPU_MODE(pVCpu))
9195 {
9196 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
9197 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
9198 case IEMMODE_16BIT:
9199 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
9200 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
9201 else
9202 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
9203 break;
9204 }
9205 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
9206 szInstr, sizeof(szInstr), &cbInstr);
9207 }
9208
9209 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9210 Log2(("**** %s fExec=%x\n"
9211 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
9212 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
9213 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
9214 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
9215 " %s\n"
9216 , pszFunction, pVCpu->iem.s.fExec,
9217 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
9218 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
9219 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
9220 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
9221 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
9222 szInstr));
9223
9224 /* This stuff sucks atm. as it fills the log with MSRs. */
9225 //if (LogIs3Enabled())
9226 // DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
9227 }
9228 else
9229# endif
9230 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
9231 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
9232 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
9233}
9234#endif /* LOG_ENABLED */
9235
9236
9237#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9238/**
9239 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
9240 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
9241 *
9242 * @returns Modified rcStrict.
9243 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9244 * @param rcStrict The instruction execution status.
9245 */
9246static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
9247{
9248 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
9249 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
9250 {
9251 /* VMX preemption timer takes priority over NMI-window exits. */
9252 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
9253 {
9254 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
9255 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
9256 }
9257 /*
9258 * Check remaining intercepts.
9259 *
9260 * NMI-window and Interrupt-window VM-exits.
9261 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
9262 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
9263 *
9264 * See Intel spec. 26.7.6 "NMI-Window Exiting".
9265 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
9266 */
9267 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
9268 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9269 && !TRPMHasTrap(pVCpu))
9270 {
9271 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
9272 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
9273 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
9274 {
9275 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
9276 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
9277 }
9278 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
9279 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
9280 {
9281 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
9282 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
9283 }
9284 }
9285 }
9286 /* TPR-below threshold/APIC write has the highest priority. */
9287 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
9288 {
9289 rcStrict = iemVmxApicWriteEmulation(pVCpu);
9290 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9291 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
9292 }
9293 /* MTF takes priority over VMX-preemption timer. */
9294 else
9295 {
9296 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
9297 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9298 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
9299 }
9300 return rcStrict;
9301}
9302#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9303
9304
9305/**
9306 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
9307 * IEMExecOneWithPrefetchedByPC.
9308 *
9309 * Similar code is found in IEMExecLots.
9310 *
9311 * @return Strict VBox status code.
9312 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9313 * @param fExecuteInhibit If set, execute the instruction following CLI,
9314 * POP SS and MOV SS,GR.
9315 * @param pszFunction The calling function name.
9316 */
9317DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
9318{
9319 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9320 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9321 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9322 RT_NOREF_PV(pszFunction);
9323
9324#ifdef IEM_WITH_SETJMP
9325 VBOXSTRICTRC rcStrict;
9326 IEM_TRY_SETJMP(pVCpu, rcStrict)
9327 {
9328 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9329 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9330 }
9331 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9332 {
9333 pVCpu->iem.s.cLongJumps++;
9334 }
9335 IEM_CATCH_LONGJMP_END(pVCpu);
9336#else
9337 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9338 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9339#endif
9340 if (rcStrict == VINF_SUCCESS)
9341 pVCpu->iem.s.cInstructions++;
9342 if (pVCpu->iem.s.cActiveMappings > 0)
9343 {
9344 Assert(rcStrict != VINF_SUCCESS);
9345 iemMemRollback(pVCpu);
9346 }
9347 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9348 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9349 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9350
9351//#ifdef DEBUG
9352// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
9353//#endif
9354
9355#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9356 /*
9357 * Perform any VMX nested-guest instruction boundary actions.
9358 *
9359 * If any of these causes a VM-exit, we must skip executing the next
9360 * instruction (would run into stale page tables). A VM-exit makes sure
9361 * there is no interrupt-inhibition, so that should ensure we don't go
9362 * to try execute the next instruction. Clearing fExecuteInhibit is
9363 * problematic because of the setjmp/longjmp clobbering above.
9364 */
9365 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9366 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
9367 || rcStrict != VINF_SUCCESS)
9368 { /* likely */ }
9369 else
9370 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9371#endif
9372
9373 /* Execute the next instruction as well if a cli, pop ss or
9374 mov ss, Gr has just completed successfully. */
9375 if ( fExecuteInhibit
9376 && rcStrict == VINF_SUCCESS
9377 && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
9378 {
9379 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fExec & (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK));
9380 if (rcStrict == VINF_SUCCESS)
9381 {
9382#ifdef LOG_ENABLED
9383 iemLogCurInstr(pVCpu, false, pszFunction);
9384#endif
9385#ifdef IEM_WITH_SETJMP
9386 IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
9387 {
9388 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9389 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9390 }
9391 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9392 {
9393 pVCpu->iem.s.cLongJumps++;
9394 }
9395 IEM_CATCH_LONGJMP_END(pVCpu);
9396#else
9397 IEM_OPCODE_GET_FIRST_U8(&b);
9398 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9399#endif
9400 if (rcStrict == VINF_SUCCESS)
9401 {
9402 pVCpu->iem.s.cInstructions++;
9403#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9404 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9405 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
9406 { /* likely */ }
9407 else
9408 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9409#endif
9410 }
9411 if (pVCpu->iem.s.cActiveMappings > 0)
9412 {
9413 Assert(rcStrict != VINF_SUCCESS);
9414 iemMemRollback(pVCpu);
9415 }
9416 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9417 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9418 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9419 }
9420 else if (pVCpu->iem.s.cActiveMappings > 0)
9421 iemMemRollback(pVCpu);
9422 /** @todo drop this after we bake this change into RIP advancing. */
9423 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
9424 }
9425
9426 /*
9427 * Return value fiddling, statistics and sanity assertions.
9428 */
9429 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9430
9431 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9432 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9433 return rcStrict;
9434}
9435
9436
9437/**
9438 * Execute one instruction.
9439 *
9440 * @return Strict VBox status code.
9441 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9442 */
9443VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
9444{
9445 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
9446#ifdef LOG_ENABLED
9447 iemLogCurInstr(pVCpu, true, "IEMExecOne");
9448#endif
9449
9450 /*
9451 * Do the decoding and emulation.
9452 */
9453 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9454 if (rcStrict == VINF_SUCCESS)
9455 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
9456 else if (pVCpu->iem.s.cActiveMappings > 0)
9457 iemMemRollback(pVCpu);
9458
9459 if (rcStrict != VINF_SUCCESS)
9460 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9461 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9462 return rcStrict;
9463}
9464
9465
9466VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9467{
9468 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9469 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9470 if (rcStrict == VINF_SUCCESS)
9471 {
9472 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
9473 if (pcbWritten)
9474 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9475 }
9476 else if (pVCpu->iem.s.cActiveMappings > 0)
9477 iemMemRollback(pVCpu);
9478
9479 return rcStrict;
9480}
9481
9482
9483VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9484 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9485{
9486 VBOXSTRICTRC rcStrict;
9487 if ( cbOpcodeBytes
9488 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9489 {
9490 iemInitDecoder(pVCpu, 0 /*fExecOpts*/);
9491#ifdef IEM_WITH_CODE_TLB
9492 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9493 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9494 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9495 pVCpu->iem.s.offCurInstrStart = 0;
9496 pVCpu->iem.s.offInstrNextByte = 0;
9497 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9498#else
9499 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9500 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9501#endif
9502 rcStrict = VINF_SUCCESS;
9503 }
9504 else
9505 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9506 if (rcStrict == VINF_SUCCESS)
9507 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
9508 else if (pVCpu->iem.s.cActiveMappings > 0)
9509 iemMemRollback(pVCpu);
9510
9511 return rcStrict;
9512}
9513
9514
9515VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9516{
9517 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9518 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9519 if (rcStrict == VINF_SUCCESS)
9520 {
9521 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
9522 if (pcbWritten)
9523 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9524 }
9525 else if (pVCpu->iem.s.cActiveMappings > 0)
9526 iemMemRollback(pVCpu);
9527
9528 return rcStrict;
9529}
9530
9531
9532VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9533 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9534{
9535 VBOXSTRICTRC rcStrict;
9536 if ( cbOpcodeBytes
9537 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9538 {
9539 iemInitDecoder(pVCpu, IEM_F_BYPASS_HANDLERS);
9540#ifdef IEM_WITH_CODE_TLB
9541 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9542 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9543 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9544 pVCpu->iem.s.offCurInstrStart = 0;
9545 pVCpu->iem.s.offInstrNextByte = 0;
9546 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9547#else
9548 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9549 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9550#endif
9551 rcStrict = VINF_SUCCESS;
9552 }
9553 else
9554 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9555 if (rcStrict == VINF_SUCCESS)
9556 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
9557 else if (pVCpu->iem.s.cActiveMappings > 0)
9558 iemMemRollback(pVCpu);
9559
9560 return rcStrict;
9561}
9562
9563
9564/**
9565 * For handling split cacheline lock operations when the host has split-lock
9566 * detection enabled.
9567 *
9568 * This will cause the interpreter to disregard the lock prefix and implicit
9569 * locking (xchg).
9570 *
9571 * @returns Strict VBox status code.
9572 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9573 */
9574VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
9575{
9576 /*
9577 * Do the decoding and emulation.
9578 */
9579 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_X86_DISREGARD_LOCK);
9580 if (rcStrict == VINF_SUCCESS)
9581 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
9582 else if (pVCpu->iem.s.cActiveMappings > 0)
9583 iemMemRollback(pVCpu);
9584
9585 if (rcStrict != VINF_SUCCESS)
9586 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9587 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9588 return rcStrict;
9589}
9590
9591
9592/**
9593 * Code common to IEMExecLots and IEMExecRecompilerThreaded that attempts to
9594 * inject a pending TRPM trap.
9595 */
9596VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu)
9597{
9598 Assert(TRPMHasTrap(pVCpu));
9599
9600 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9601 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
9602 {
9603 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
9604#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9605 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
9606 if (fIntrEnabled)
9607 {
9608 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
9609 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9610 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
9611 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
9612 else
9613 {
9614 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
9615 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
9616 }
9617 }
9618#else
9619 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9620#endif
9621 if (fIntrEnabled)
9622 {
9623 uint8_t u8TrapNo;
9624 TRPMEVENT enmType;
9625 uint32_t uErrCode;
9626 RTGCPTR uCr2;
9627 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
9628 AssertRC(rc2);
9629 Assert(enmType == TRPM_HARDWARE_INT);
9630 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
9631
9632 TRPMResetTrap(pVCpu);
9633
9634#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9635 /* Injecting an event may cause a VM-exit. */
9636 if ( rcStrict != VINF_SUCCESS
9637 && rcStrict != VINF_IEM_RAISED_XCPT)
9638 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
9639#else
9640 NOREF(rcStrict);
9641#endif
9642 }
9643 }
9644
9645 return VINF_SUCCESS;
9646}
9647
9648
9649VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
9650{
9651 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
9652 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
9653 Assert(cMaxInstructions > 0);
9654
9655 /*
9656 * See if there is an interrupt pending in TRPM, inject it if we can.
9657 */
9658 /** @todo What if we are injecting an exception and not an interrupt? Is that
9659 * possible here? For now we assert it is indeed only an interrupt. */
9660 if (!TRPMHasTrap(pVCpu))
9661 { /* likely */ }
9662 else
9663 {
9664 VBOXSTRICTRC rcStrict = iemExecInjectPendingTrap(pVCpu);
9665 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9666 { /*likely */ }
9667 else
9668 return rcStrict;
9669 }
9670
9671 /*
9672 * Initial decoder init w/ prefetch, then setup setjmp.
9673 */
9674 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9675 if (rcStrict == VINF_SUCCESS)
9676 {
9677#ifdef IEM_WITH_SETJMP
9678 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
9679 IEM_TRY_SETJMP(pVCpu, rcStrict)
9680#endif
9681 {
9682 /*
9683 * The run loop. We limit ourselves to 4096 instructions right now.
9684 */
9685 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
9686 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9687 for (;;)
9688 {
9689 /*
9690 * Log the state.
9691 */
9692#ifdef LOG_ENABLED
9693 iemLogCurInstr(pVCpu, true, "IEMExecLots");
9694#endif
9695
9696 /*
9697 * Do the decoding and emulation.
9698 */
9699 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9700 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9701#ifdef VBOX_STRICT
9702 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
9703#endif
9704 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9705 {
9706 Assert(pVCpu->iem.s.cActiveMappings == 0);
9707 pVCpu->iem.s.cInstructions++;
9708
9709#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9710 /* Perform any VMX nested-guest instruction boundary actions. */
9711 uint64_t fCpu = pVCpu->fLocalForcedActions;
9712 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9713 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
9714 { /* likely */ }
9715 else
9716 {
9717 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9718 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9719 fCpu = pVCpu->fLocalForcedActions;
9720 else
9721 {
9722 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9723 break;
9724 }
9725 }
9726#endif
9727 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9728 {
9729#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
9730 uint64_t fCpu = pVCpu->fLocalForcedActions;
9731#endif
9732 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9733 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9734 | VMCPU_FF_TLB_FLUSH
9735 | VMCPU_FF_UNHALT );
9736
9737 if (RT_LIKELY( ( !fCpu
9738 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9739 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
9740 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
9741 {
9742 if (--cMaxInstructionsGccStupidity > 0)
9743 {
9744 /* Poll timers every now an then according to the caller's specs. */
9745 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
9746 || !TMTimerPollBool(pVM, pVCpu))
9747 {
9748 Assert(pVCpu->iem.s.cActiveMappings == 0);
9749 iemReInitDecoder(pVCpu);
9750 continue;
9751 }
9752 }
9753 }
9754 }
9755 Assert(pVCpu->iem.s.cActiveMappings == 0);
9756 }
9757 else if (pVCpu->iem.s.cActiveMappings > 0)
9758 iemMemRollback(pVCpu);
9759 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9760 break;
9761 }
9762 }
9763#ifdef IEM_WITH_SETJMP
9764 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9765 {
9766 if (pVCpu->iem.s.cActiveMappings > 0)
9767 iemMemRollback(pVCpu);
9768# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9769 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9770# endif
9771 pVCpu->iem.s.cLongJumps++;
9772 }
9773 IEM_CATCH_LONGJMP_END(pVCpu);
9774#endif
9775
9776 /*
9777 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9778 */
9779 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9780 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9781 }
9782 else
9783 {
9784 if (pVCpu->iem.s.cActiveMappings > 0)
9785 iemMemRollback(pVCpu);
9786
9787#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9788 /*
9789 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9790 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9791 */
9792 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9793#endif
9794 }
9795
9796 /*
9797 * Maybe re-enter raw-mode and log.
9798 */
9799 if (rcStrict != VINF_SUCCESS)
9800 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9801 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9802 if (pcInstructions)
9803 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
9804 return rcStrict;
9805}
9806
9807
9808/**
9809 * Interface used by EMExecuteExec, does exit statistics and limits.
9810 *
9811 * @returns Strict VBox status code.
9812 * @param pVCpu The cross context virtual CPU structure.
9813 * @param fWillExit To be defined.
9814 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
9815 * @param cMaxInstructions Maximum number of instructions to execute.
9816 * @param cMaxInstructionsWithoutExits
9817 * The max number of instructions without exits.
9818 * @param pStats Where to return statistics.
9819 */
9820VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
9821 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
9822{
9823 NOREF(fWillExit); /** @todo define flexible exit crits */
9824
9825 /*
9826 * Initialize return stats.
9827 */
9828 pStats->cInstructions = 0;
9829 pStats->cExits = 0;
9830 pStats->cMaxExitDistance = 0;
9831 pStats->cReserved = 0;
9832
9833 /*
9834 * Initial decoder init w/ prefetch, then setup setjmp.
9835 */
9836 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9837 if (rcStrict == VINF_SUCCESS)
9838 {
9839#ifdef IEM_WITH_SETJMP
9840 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf?!? */
9841 IEM_TRY_SETJMP(pVCpu, rcStrict)
9842#endif
9843 {
9844#ifdef IN_RING0
9845 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
9846#endif
9847 uint32_t cInstructionSinceLastExit = 0;
9848
9849 /*
9850 * The run loop. We limit ourselves to 4096 instructions right now.
9851 */
9852 PVM pVM = pVCpu->CTX_SUFF(pVM);
9853 for (;;)
9854 {
9855 /*
9856 * Log the state.
9857 */
9858#ifdef LOG_ENABLED
9859 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
9860#endif
9861
9862 /*
9863 * Do the decoding and emulation.
9864 */
9865 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
9866
9867 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9868 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9869
9870 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
9871 && cInstructionSinceLastExit > 0 /* don't count the first */ )
9872 {
9873 pStats->cExits += 1;
9874 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
9875 pStats->cMaxExitDistance = cInstructionSinceLastExit;
9876 cInstructionSinceLastExit = 0;
9877 }
9878
9879 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9880 {
9881 Assert(pVCpu->iem.s.cActiveMappings == 0);
9882 pVCpu->iem.s.cInstructions++;
9883 pStats->cInstructions++;
9884 cInstructionSinceLastExit++;
9885
9886#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9887 /* Perform any VMX nested-guest instruction boundary actions. */
9888 uint64_t fCpu = pVCpu->fLocalForcedActions;
9889 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9890 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
9891 { /* likely */ }
9892 else
9893 {
9894 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9895 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9896 fCpu = pVCpu->fLocalForcedActions;
9897 else
9898 {
9899 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9900 break;
9901 }
9902 }
9903#endif
9904 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9905 {
9906#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
9907 uint64_t fCpu = pVCpu->fLocalForcedActions;
9908#endif
9909 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9910 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9911 | VMCPU_FF_TLB_FLUSH
9912 | VMCPU_FF_UNHALT );
9913 if (RT_LIKELY( ( ( !fCpu
9914 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9915 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
9916 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
9917 || pStats->cInstructions < cMinInstructions))
9918 {
9919 if (pStats->cInstructions < cMaxInstructions)
9920 {
9921 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
9922 {
9923#ifdef IN_RING0
9924 if ( !fCheckPreemptionPending
9925 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
9926#endif
9927 {
9928 Assert(pVCpu->iem.s.cActiveMappings == 0);
9929 iemReInitDecoder(pVCpu);
9930 continue;
9931 }
9932#ifdef IN_RING0
9933 rcStrict = VINF_EM_RAW_INTERRUPT;
9934 break;
9935#endif
9936 }
9937 }
9938 }
9939 Assert(!(fCpu & VMCPU_FF_IEM));
9940 }
9941 Assert(pVCpu->iem.s.cActiveMappings == 0);
9942 }
9943 else if (pVCpu->iem.s.cActiveMappings > 0)
9944 iemMemRollback(pVCpu);
9945 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9946 break;
9947 }
9948 }
9949#ifdef IEM_WITH_SETJMP
9950 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9951 {
9952 if (pVCpu->iem.s.cActiveMappings > 0)
9953 iemMemRollback(pVCpu);
9954 pVCpu->iem.s.cLongJumps++;
9955 }
9956 IEM_CATCH_LONGJMP_END(pVCpu);
9957#endif
9958
9959 /*
9960 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9961 */
9962 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9963 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9964 }
9965 else
9966 {
9967 if (pVCpu->iem.s.cActiveMappings > 0)
9968 iemMemRollback(pVCpu);
9969
9970#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9971 /*
9972 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9973 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9974 */
9975 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9976#endif
9977 }
9978
9979 /*
9980 * Maybe re-enter raw-mode and log.
9981 */
9982 if (rcStrict != VINF_SUCCESS)
9983 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
9984 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
9985 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
9986 return rcStrict;
9987}
9988
9989
9990/**
9991 * Injects a trap, fault, abort, software interrupt or external interrupt.
9992 *
9993 * The parameter list matches TRPMQueryTrapAll pretty closely.
9994 *
9995 * @returns Strict VBox status code.
9996 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9997 * @param u8TrapNo The trap number.
9998 * @param enmType What type is it (trap/fault/abort), software
9999 * interrupt or hardware interrupt.
10000 * @param uErrCode The error code if applicable.
10001 * @param uCr2 The CR2 value if applicable.
10002 * @param cbInstr The instruction length (only relevant for
10003 * software interrupts).
10004 */
10005VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
10006 uint8_t cbInstr)
10007{
10008 iemInitDecoder(pVCpu, 0 /*fExecOpts*/); /** @todo wrong init function! */
10009#ifdef DBGFTRACE_ENABLED
10010 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
10011 u8TrapNo, enmType, uErrCode, uCr2);
10012#endif
10013
10014 uint32_t fFlags;
10015 switch (enmType)
10016 {
10017 case TRPM_HARDWARE_INT:
10018 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
10019 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
10020 uErrCode = uCr2 = 0;
10021 break;
10022
10023 case TRPM_SOFTWARE_INT:
10024 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
10025 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
10026 uErrCode = uCr2 = 0;
10027 break;
10028
10029 case TRPM_TRAP:
10030 case TRPM_NMI: /** @todo Distinguish NMI from exception 2. */
10031 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
10032 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
10033 if (u8TrapNo == X86_XCPT_PF)
10034 fFlags |= IEM_XCPT_FLAGS_CR2;
10035 switch (u8TrapNo)
10036 {
10037 case X86_XCPT_DF:
10038 case X86_XCPT_TS:
10039 case X86_XCPT_NP:
10040 case X86_XCPT_SS:
10041 case X86_XCPT_PF:
10042 case X86_XCPT_AC:
10043 case X86_XCPT_GP:
10044 fFlags |= IEM_XCPT_FLAGS_ERR;
10045 break;
10046 }
10047 break;
10048
10049 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10050 }
10051
10052 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
10053
10054 if (pVCpu->iem.s.cActiveMappings > 0)
10055 iemMemRollback(pVCpu);
10056
10057 return rcStrict;
10058}
10059
10060
10061/**
10062 * Injects the active TRPM event.
10063 *
10064 * @returns Strict VBox status code.
10065 * @param pVCpu The cross context virtual CPU structure.
10066 */
10067VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
10068{
10069#ifndef IEM_IMPLEMENTS_TASKSWITCH
10070 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
10071#else
10072 uint8_t u8TrapNo;
10073 TRPMEVENT enmType;
10074 uint32_t uErrCode;
10075 RTGCUINTPTR uCr2;
10076 uint8_t cbInstr;
10077 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
10078 if (RT_FAILURE(rc))
10079 return rc;
10080
10081 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
10082 * ICEBP \#DB injection as a special case. */
10083 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
10084#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
10085 if (rcStrict == VINF_SVM_VMEXIT)
10086 rcStrict = VINF_SUCCESS;
10087#endif
10088#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10089 if (rcStrict == VINF_VMX_VMEXIT)
10090 rcStrict = VINF_SUCCESS;
10091#endif
10092 /** @todo Are there any other codes that imply the event was successfully
10093 * delivered to the guest? See @bugref{6607}. */
10094 if ( rcStrict == VINF_SUCCESS
10095 || rcStrict == VINF_IEM_RAISED_XCPT)
10096 TRPMResetTrap(pVCpu);
10097
10098 return rcStrict;
10099#endif
10100}
10101
10102
10103VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
10104{
10105 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10106 return VERR_NOT_IMPLEMENTED;
10107}
10108
10109
10110VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
10111{
10112 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10113 return VERR_NOT_IMPLEMENTED;
10114}
10115
10116
10117/**
10118 * Interface for HM and EM for executing string I/O OUT (write) instructions.
10119 *
10120 * This API ASSUMES that the caller has already verified that the guest code is
10121 * allowed to access the I/O port. (The I/O port is in the DX register in the
10122 * guest state.)
10123 *
10124 * @returns Strict VBox status code.
10125 * @param pVCpu The cross context virtual CPU structure.
10126 * @param cbValue The size of the I/O port access (1, 2, or 4).
10127 * @param enmAddrMode The addressing mode.
10128 * @param fRepPrefix Indicates whether a repeat prefix is used
10129 * (doesn't matter which for this instruction).
10130 * @param cbInstr The instruction length in bytes.
10131 * @param iEffSeg The effective segment address.
10132 * @param fIoChecked Whether the access to the I/O port has been
10133 * checked or not. It's typically checked in the
10134 * HM scenario.
10135 */
10136VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10137 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
10138{
10139 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
10140 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10141
10142 /*
10143 * State init.
10144 */
10145 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10146
10147 /*
10148 * Switch orgy for getting to the right handler.
10149 */
10150 VBOXSTRICTRC rcStrict;
10151 if (fRepPrefix)
10152 {
10153 switch (enmAddrMode)
10154 {
10155 case IEMMODE_16BIT:
10156 switch (cbValue)
10157 {
10158 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10159 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10160 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10161 default:
10162 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10163 }
10164 break;
10165
10166 case IEMMODE_32BIT:
10167 switch (cbValue)
10168 {
10169 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10170 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10171 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10172 default:
10173 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10174 }
10175 break;
10176
10177 case IEMMODE_64BIT:
10178 switch (cbValue)
10179 {
10180 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10181 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10182 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10183 default:
10184 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10185 }
10186 break;
10187
10188 default:
10189 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10190 }
10191 }
10192 else
10193 {
10194 switch (enmAddrMode)
10195 {
10196 case IEMMODE_16BIT:
10197 switch (cbValue)
10198 {
10199 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10200 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10201 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10202 default:
10203 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10204 }
10205 break;
10206
10207 case IEMMODE_32BIT:
10208 switch (cbValue)
10209 {
10210 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10211 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10212 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10213 default:
10214 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10215 }
10216 break;
10217
10218 case IEMMODE_64BIT:
10219 switch (cbValue)
10220 {
10221 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10222 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10223 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10224 default:
10225 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10226 }
10227 break;
10228
10229 default:
10230 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10231 }
10232 }
10233
10234 if (pVCpu->iem.s.cActiveMappings)
10235 iemMemRollback(pVCpu);
10236
10237 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10238}
10239
10240
10241/**
10242 * Interface for HM and EM for executing string I/O IN (read) instructions.
10243 *
10244 * This API ASSUMES that the caller has already verified that the guest code is
10245 * allowed to access the I/O port. (The I/O port is in the DX register in the
10246 * guest state.)
10247 *
10248 * @returns Strict VBox status code.
10249 * @param pVCpu The cross context virtual CPU structure.
10250 * @param cbValue The size of the I/O port access (1, 2, or 4).
10251 * @param enmAddrMode The addressing mode.
10252 * @param fRepPrefix Indicates whether a repeat prefix is used
10253 * (doesn't matter which for this instruction).
10254 * @param cbInstr The instruction length in bytes.
10255 * @param fIoChecked Whether the access to the I/O port has been
10256 * checked or not. It's typically checked in the
10257 * HM scenario.
10258 */
10259VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10260 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
10261{
10262 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10263
10264 /*
10265 * State init.
10266 */
10267 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10268
10269 /*
10270 * Switch orgy for getting to the right handler.
10271 */
10272 VBOXSTRICTRC rcStrict;
10273 if (fRepPrefix)
10274 {
10275 switch (enmAddrMode)
10276 {
10277 case IEMMODE_16BIT:
10278 switch (cbValue)
10279 {
10280 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10281 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10282 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10283 default:
10284 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10285 }
10286 break;
10287
10288 case IEMMODE_32BIT:
10289 switch (cbValue)
10290 {
10291 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10292 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10293 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10294 default:
10295 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10296 }
10297 break;
10298
10299 case IEMMODE_64BIT:
10300 switch (cbValue)
10301 {
10302 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10303 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10304 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10305 default:
10306 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10307 }
10308 break;
10309
10310 default:
10311 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10312 }
10313 }
10314 else
10315 {
10316 switch (enmAddrMode)
10317 {
10318 case IEMMODE_16BIT:
10319 switch (cbValue)
10320 {
10321 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10322 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10323 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10324 default:
10325 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10326 }
10327 break;
10328
10329 case IEMMODE_32BIT:
10330 switch (cbValue)
10331 {
10332 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10333 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10334 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10335 default:
10336 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10337 }
10338 break;
10339
10340 case IEMMODE_64BIT:
10341 switch (cbValue)
10342 {
10343 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10344 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10345 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10346 default:
10347 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10348 }
10349 break;
10350
10351 default:
10352 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10353 }
10354 }
10355
10356 if ( pVCpu->iem.s.cActiveMappings == 0
10357 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
10358 { /* likely */ }
10359 else
10360 {
10361 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
10362 iemMemRollback(pVCpu);
10363 }
10364 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10365}
10366
10367
10368/**
10369 * Interface for rawmode to write execute an OUT instruction.
10370 *
10371 * @returns Strict VBox status code.
10372 * @param pVCpu The cross context virtual CPU structure.
10373 * @param cbInstr The instruction length in bytes.
10374 * @param u16Port The port to read.
10375 * @param fImm Whether the port is specified using an immediate operand or
10376 * using the implicit DX register.
10377 * @param cbReg The register size.
10378 *
10379 * @remarks In ring-0 not all of the state needs to be synced in.
10380 */
10381VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10382{
10383 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10384 Assert(cbReg <= 4 && cbReg != 3);
10385
10386 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10387 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, cbReg,
10388 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10389 Assert(!pVCpu->iem.s.cActiveMappings);
10390 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10391}
10392
10393
10394/**
10395 * Interface for rawmode to write execute an IN instruction.
10396 *
10397 * @returns Strict VBox status code.
10398 * @param pVCpu The cross context virtual CPU structure.
10399 * @param cbInstr The instruction length in bytes.
10400 * @param u16Port The port to read.
10401 * @param fImm Whether the port is specified using an immediate operand or
10402 * using the implicit DX.
10403 * @param cbReg The register size.
10404 */
10405VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10406{
10407 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10408 Assert(cbReg <= 4 && cbReg != 3);
10409
10410 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10411 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, cbReg,
10412 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10413 Assert(!pVCpu->iem.s.cActiveMappings);
10414 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10415}
10416
10417
10418/**
10419 * Interface for HM and EM to write to a CRx register.
10420 *
10421 * @returns Strict VBox status code.
10422 * @param pVCpu The cross context virtual CPU structure.
10423 * @param cbInstr The instruction length in bytes.
10424 * @param iCrReg The control register number (destination).
10425 * @param iGReg The general purpose register number (source).
10426 *
10427 * @remarks In ring-0 not all of the state needs to be synced in.
10428 */
10429VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
10430{
10431 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10432 Assert(iCrReg < 16);
10433 Assert(iGReg < 16);
10434
10435 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10436 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
10437 Assert(!pVCpu->iem.s.cActiveMappings);
10438 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10439}
10440
10441
10442/**
10443 * Interface for HM and EM to read from a CRx register.
10444 *
10445 * @returns Strict VBox status code.
10446 * @param pVCpu The cross context virtual CPU structure.
10447 * @param cbInstr The instruction length in bytes.
10448 * @param iGReg The general purpose register number (destination).
10449 * @param iCrReg The control register number (source).
10450 *
10451 * @remarks In ring-0 not all of the state needs to be synced in.
10452 */
10453VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
10454{
10455 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10456 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
10457 | CPUMCTX_EXTRN_APIC_TPR);
10458 Assert(iCrReg < 16);
10459 Assert(iGReg < 16);
10460
10461 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10462 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
10463 Assert(!pVCpu->iem.s.cActiveMappings);
10464 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10465}
10466
10467
10468/**
10469 * Interface for HM and EM to write to a DRx register.
10470 *
10471 * @returns Strict VBox status code.
10472 * @param pVCpu The cross context virtual CPU structure.
10473 * @param cbInstr The instruction length in bytes.
10474 * @param iDrReg The debug register number (destination).
10475 * @param iGReg The general purpose register number (source).
10476 *
10477 * @remarks In ring-0 not all of the state needs to be synced in.
10478 */
10479VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iDrReg, uint8_t iGReg)
10480{
10481 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10482 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10483 Assert(iDrReg < 8);
10484 Assert(iGReg < 16);
10485
10486 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10487 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Dd_Rd, iDrReg, iGReg);
10488 Assert(!pVCpu->iem.s.cActiveMappings);
10489 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10490}
10491
10492
10493/**
10494 * Interface for HM and EM to read from a DRx register.
10495 *
10496 * @returns Strict VBox status code.
10497 * @param pVCpu The cross context virtual CPU structure.
10498 * @param cbInstr The instruction length in bytes.
10499 * @param iGReg The general purpose register number (destination).
10500 * @param iDrReg The debug register number (source).
10501 *
10502 * @remarks In ring-0 not all of the state needs to be synced in.
10503 */
10504VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iDrReg)
10505{
10506 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10507 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10508 Assert(iDrReg < 8);
10509 Assert(iGReg < 16);
10510
10511 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10512 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Dd, iGReg, iDrReg);
10513 Assert(!pVCpu->iem.s.cActiveMappings);
10514 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10515}
10516
10517
10518/**
10519 * Interface for HM and EM to clear the CR0[TS] bit.
10520 *
10521 * @returns Strict VBox status code.
10522 * @param pVCpu The cross context virtual CPU structure.
10523 * @param cbInstr The instruction length in bytes.
10524 *
10525 * @remarks In ring-0 not all of the state needs to be synced in.
10526 */
10527VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
10528{
10529 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10530
10531 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10532 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
10533 Assert(!pVCpu->iem.s.cActiveMappings);
10534 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10535}
10536
10537
10538/**
10539 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
10540 *
10541 * @returns Strict VBox status code.
10542 * @param pVCpu The cross context virtual CPU structure.
10543 * @param cbInstr The instruction length in bytes.
10544 * @param uValue The value to load into CR0.
10545 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
10546 * memory operand. Otherwise pass NIL_RTGCPTR.
10547 *
10548 * @remarks In ring-0 not all of the state needs to be synced in.
10549 */
10550VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
10551{
10552 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10553
10554 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10555 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
10556 Assert(!pVCpu->iem.s.cActiveMappings);
10557 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10558}
10559
10560
10561/**
10562 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
10563 *
10564 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
10565 *
10566 * @returns Strict VBox status code.
10567 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10568 * @param cbInstr The instruction length in bytes.
10569 * @remarks In ring-0 not all of the state needs to be synced in.
10570 * @thread EMT(pVCpu)
10571 */
10572VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
10573{
10574 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10575
10576 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10577 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
10578 Assert(!pVCpu->iem.s.cActiveMappings);
10579 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10580}
10581
10582
10583/**
10584 * Interface for HM and EM to emulate the WBINVD instruction.
10585 *
10586 * @returns Strict VBox status code.
10587 * @param pVCpu The cross context virtual CPU structure.
10588 * @param cbInstr The instruction length in bytes.
10589 *
10590 * @remarks In ring-0 not all of the state needs to be synced in.
10591 */
10592VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10593{
10594 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10595
10596 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10597 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
10598 Assert(!pVCpu->iem.s.cActiveMappings);
10599 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10600}
10601
10602
10603/**
10604 * Interface for HM and EM to emulate the INVD instruction.
10605 *
10606 * @returns Strict VBox status code.
10607 * @param pVCpu The cross context virtual CPU structure.
10608 * @param cbInstr The instruction length in bytes.
10609 *
10610 * @remarks In ring-0 not all of the state needs to be synced in.
10611 */
10612VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10613{
10614 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10615
10616 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10617 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
10618 Assert(!pVCpu->iem.s.cActiveMappings);
10619 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10620}
10621
10622
10623/**
10624 * Interface for HM and EM to emulate the INVLPG instruction.
10625 *
10626 * @returns Strict VBox status code.
10627 * @retval VINF_PGM_SYNC_CR3
10628 *
10629 * @param pVCpu The cross context virtual CPU structure.
10630 * @param cbInstr The instruction length in bytes.
10631 * @param GCPtrPage The effective address of the page to invalidate.
10632 *
10633 * @remarks In ring-0 not all of the state needs to be synced in.
10634 */
10635VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
10636{
10637 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10638
10639 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10640 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
10641 Assert(!pVCpu->iem.s.cActiveMappings);
10642 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10643}
10644
10645
10646/**
10647 * Interface for HM and EM to emulate the INVPCID instruction.
10648 *
10649 * @returns Strict VBox status code.
10650 * @retval VINF_PGM_SYNC_CR3
10651 *
10652 * @param pVCpu The cross context virtual CPU structure.
10653 * @param cbInstr The instruction length in bytes.
10654 * @param iEffSeg The effective segment register.
10655 * @param GCPtrDesc The effective address of the INVPCID descriptor.
10656 * @param uType The invalidation type.
10657 *
10658 * @remarks In ring-0 not all of the state needs to be synced in.
10659 */
10660VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
10661 uint64_t uType)
10662{
10663 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
10664
10665 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10666 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
10667 Assert(!pVCpu->iem.s.cActiveMappings);
10668 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10669}
10670
10671
10672/**
10673 * Interface for HM and EM to emulate the CPUID instruction.
10674 *
10675 * @returns Strict VBox status code.
10676 *
10677 * @param pVCpu The cross context virtual CPU structure.
10678 * @param cbInstr The instruction length in bytes.
10679 *
10680 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
10681 */
10682VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
10683{
10684 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10685 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
10686
10687 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10688 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
10689 Assert(!pVCpu->iem.s.cActiveMappings);
10690 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10691}
10692
10693
10694/**
10695 * Interface for HM and EM to emulate the RDPMC instruction.
10696 *
10697 * @returns Strict VBox status code.
10698 *
10699 * @param pVCpu The cross context virtual CPU structure.
10700 * @param cbInstr The instruction length in bytes.
10701 *
10702 * @remarks Not all of the state needs to be synced in.
10703 */
10704VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
10705{
10706 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10707 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10708
10709 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10710 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
10711 Assert(!pVCpu->iem.s.cActiveMappings);
10712 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10713}
10714
10715
10716/**
10717 * Interface for HM and EM to emulate the RDTSC instruction.
10718 *
10719 * @returns Strict VBox status code.
10720 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10721 *
10722 * @param pVCpu The cross context virtual CPU structure.
10723 * @param cbInstr The instruction length in bytes.
10724 *
10725 * @remarks Not all of the state needs to be synced in.
10726 */
10727VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
10728{
10729 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10730 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10731
10732 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10733 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
10734 Assert(!pVCpu->iem.s.cActiveMappings);
10735 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10736}
10737
10738
10739/**
10740 * Interface for HM and EM to emulate the RDTSCP instruction.
10741 *
10742 * @returns Strict VBox status code.
10743 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10744 *
10745 * @param pVCpu The cross context virtual CPU structure.
10746 * @param cbInstr The instruction length in bytes.
10747 *
10748 * @remarks Not all of the state needs to be synced in. Recommended
10749 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
10750 */
10751VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
10752{
10753 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10754 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
10755
10756 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10757 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
10758 Assert(!pVCpu->iem.s.cActiveMappings);
10759 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10760}
10761
10762
10763/**
10764 * Interface for HM and EM to emulate the RDMSR instruction.
10765 *
10766 * @returns Strict VBox status code.
10767 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10768 *
10769 * @param pVCpu The cross context virtual CPU structure.
10770 * @param cbInstr The instruction length in bytes.
10771 *
10772 * @remarks Not all of the state needs to be synced in. Requires RCX and
10773 * (currently) all MSRs.
10774 */
10775VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10776{
10777 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10778 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
10779
10780 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10781 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
10782 Assert(!pVCpu->iem.s.cActiveMappings);
10783 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10784}
10785
10786
10787/**
10788 * Interface for HM and EM to emulate the WRMSR instruction.
10789 *
10790 * @returns Strict VBox status code.
10791 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10792 *
10793 * @param pVCpu The cross context virtual CPU structure.
10794 * @param cbInstr The instruction length in bytes.
10795 *
10796 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
10797 * and (currently) all MSRs.
10798 */
10799VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10800{
10801 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10802 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
10803 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
10804
10805 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10806 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
10807 Assert(!pVCpu->iem.s.cActiveMappings);
10808 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10809}
10810
10811
10812/**
10813 * Interface for HM and EM to emulate the MONITOR instruction.
10814 *
10815 * @returns Strict VBox status code.
10816 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10817 *
10818 * @param pVCpu The cross context virtual CPU structure.
10819 * @param cbInstr The instruction length in bytes.
10820 *
10821 * @remarks Not all of the state needs to be synced in.
10822 * @remarks ASSUMES the default segment of DS and no segment override prefixes
10823 * are used.
10824 */
10825VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
10826{
10827 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10828 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
10829
10830 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10831 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
10832 Assert(!pVCpu->iem.s.cActiveMappings);
10833 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10834}
10835
10836
10837/**
10838 * Interface for HM and EM to emulate the MWAIT instruction.
10839 *
10840 * @returns Strict VBox status code.
10841 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10842 *
10843 * @param pVCpu The cross context virtual CPU structure.
10844 * @param cbInstr The instruction length in bytes.
10845 *
10846 * @remarks Not all of the state needs to be synced in.
10847 */
10848VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
10849{
10850 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10851 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
10852
10853 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10854 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
10855 Assert(!pVCpu->iem.s.cActiveMappings);
10856 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10857}
10858
10859
10860/**
10861 * Interface for HM and EM to emulate the HLT instruction.
10862 *
10863 * @returns Strict VBox status code.
10864 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10865 *
10866 * @param pVCpu The cross context virtual CPU structure.
10867 * @param cbInstr The instruction length in bytes.
10868 *
10869 * @remarks Not all of the state needs to be synced in.
10870 */
10871VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
10872{
10873 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10874
10875 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10876 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
10877 Assert(!pVCpu->iem.s.cActiveMappings);
10878 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10879}
10880
10881
10882/**
10883 * Checks if IEM is in the process of delivering an event (interrupt or
10884 * exception).
10885 *
10886 * @returns true if we're in the process of raising an interrupt or exception,
10887 * false otherwise.
10888 * @param pVCpu The cross context virtual CPU structure.
10889 * @param puVector Where to store the vector associated with the
10890 * currently delivered event, optional.
10891 * @param pfFlags Where to store th event delivery flags (see
10892 * IEM_XCPT_FLAGS_XXX), optional.
10893 * @param puErr Where to store the error code associated with the
10894 * event, optional.
10895 * @param puCr2 Where to store the CR2 associated with the event,
10896 * optional.
10897 * @remarks The caller should check the flags to determine if the error code and
10898 * CR2 are valid for the event.
10899 */
10900VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
10901{
10902 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
10903 if (fRaisingXcpt)
10904 {
10905 if (puVector)
10906 *puVector = pVCpu->iem.s.uCurXcpt;
10907 if (pfFlags)
10908 *pfFlags = pVCpu->iem.s.fCurXcpt;
10909 if (puErr)
10910 *puErr = pVCpu->iem.s.uCurXcptErr;
10911 if (puCr2)
10912 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
10913 }
10914 return fRaisingXcpt;
10915}
10916
10917#ifdef IN_RING3
10918
10919/**
10920 * Handles the unlikely and probably fatal merge cases.
10921 *
10922 * @returns Merged status code.
10923 * @param rcStrict Current EM status code.
10924 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
10925 * with @a rcStrict.
10926 * @param iMemMap The memory mapping index. For error reporting only.
10927 * @param pVCpu The cross context virtual CPU structure of the calling
10928 * thread, for error reporting only.
10929 */
10930DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
10931 unsigned iMemMap, PVMCPUCC pVCpu)
10932{
10933 if (RT_FAILURE_NP(rcStrict))
10934 return rcStrict;
10935
10936 if (RT_FAILURE_NP(rcStrictCommit))
10937 return rcStrictCommit;
10938
10939 if (rcStrict == rcStrictCommit)
10940 return rcStrictCommit;
10941
10942 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
10943 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
10944 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
10945 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
10946 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
10947 return VERR_IOM_FF_STATUS_IPE;
10948}
10949
10950
10951/**
10952 * Helper for IOMR3ProcessForceFlag.
10953 *
10954 * @returns Merged status code.
10955 * @param rcStrict Current EM status code.
10956 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
10957 * with @a rcStrict.
10958 * @param iMemMap The memory mapping index. For error reporting only.
10959 * @param pVCpu The cross context virtual CPU structure of the calling
10960 * thread, for error reporting only.
10961 */
10962DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
10963{
10964 /* Simple. */
10965 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
10966 return rcStrictCommit;
10967
10968 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
10969 return rcStrict;
10970
10971 /* EM scheduling status codes. */
10972 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
10973 && rcStrict <= VINF_EM_LAST))
10974 {
10975 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
10976 && rcStrictCommit <= VINF_EM_LAST))
10977 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
10978 }
10979
10980 /* Unlikely */
10981 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
10982}
10983
10984
10985/**
10986 * Called by force-flag handling code when VMCPU_FF_IEM is set.
10987 *
10988 * @returns Merge between @a rcStrict and what the commit operation returned.
10989 * @param pVM The cross context VM structure.
10990 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10991 * @param rcStrict The status code returned by ring-0 or raw-mode.
10992 */
10993VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
10994{
10995 /*
10996 * Reset the pending commit.
10997 */
10998 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
10999 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
11000 ("%#x %#x %#x\n",
11001 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11002 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
11003
11004 /*
11005 * Commit the pending bounce buffers (usually just one).
11006 */
11007 unsigned cBufs = 0;
11008 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
11009 while (iMemMap-- > 0)
11010 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
11011 {
11012 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
11013 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
11014 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
11015
11016 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
11017 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
11018 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
11019
11020 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
11021 {
11022 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
11023 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
11024 pbBuf,
11025 cbFirst,
11026 PGMACCESSORIGIN_IEM);
11027 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
11028 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
11029 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
11030 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
11031 }
11032
11033 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
11034 {
11035 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
11036 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
11037 pbBuf + cbFirst,
11038 cbSecond,
11039 PGMACCESSORIGIN_IEM);
11040 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
11041 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
11042 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
11043 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
11044 }
11045 cBufs++;
11046 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
11047 }
11048
11049 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
11050 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
11051 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11052 pVCpu->iem.s.cActiveMappings = 0;
11053 return rcStrict;
11054}
11055
11056#endif /* IN_RING3 */
11057
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette