VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 104767

Last change on this file since 104767 was 104516, checked in by vboxsync, 9 months ago

VMM/GCM,IEM,HM: Integrate GCM with IEM, extending it to cover the mesa drv situation and valid ring-0 IN instructions to same port. Untested. TODO: NEM. bugref:9735 bugref:10683

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 437.8 KB
Line 
1/* $Id: IEMAll.cpp 104516 2024-05-04 01:53:42Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_iem IEM - Interpreted Execution Manager
30 *
31 * The interpreted exeuction manager (IEM) is for executing short guest code
32 * sequences that are causing too many exits / virtualization traps. It will
33 * also be used to interpret single instructions, thus replacing the selective
34 * interpreters in EM and IOM.
35 *
36 * Design goals:
37 * - Relatively small footprint, although we favour speed and correctness
38 * over size.
39 * - Reasonably fast.
40 * - Correctly handle lock prefixed instructions.
41 * - Complete instruction set - eventually.
42 * - Refactorable into a recompiler, maybe.
43 * - Replace EMInterpret*.
44 *
45 * Using the existing disassembler has been considered, however this is thought
46 * to conflict with speed as the disassembler chews things a bit too much while
47 * leaving us with a somewhat complicated state to interpret afterwards.
48 *
49 *
50 * The current code is very much work in progress. You've been warned!
51 *
52 *
53 * @section sec_iem_fpu_instr FPU Instructions
54 *
55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
56 * same or equivalent instructions on the host FPU. To make life easy, we also
57 * let the FPU prioritize the unmasked exceptions for us. This however, only
58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
59 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
60 * can trigger spurious FPU exceptions.
61 *
62 * The guest FPU state is not loaded into the host CPU and kept there till we
63 * leave IEM because the calling conventions have declared an all year open
64 * season on much of the FPU state. For instance an innocent looking call to
65 * memcpy might end up using a whole bunch of XMM or MM registers if the
66 * particular implementation finds it worthwhile.
67 *
68 *
69 * @section sec_iem_logging Logging
70 *
71 * The IEM code uses the \"IEM\" log group for the main logging. The different
72 * logging levels/flags are generally used for the following purposes:
73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
74 * - Flow (LogFlow) : Basic enter/exit IEM state info.
75 * - Level 2 (Log2) : ?
76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
78 * - Level 5 (Log5) : Decoding details.
79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
80 * - Level 7 (Log7) : iret++ execution logging.
81 * - Level 8 (Log8) :
82 * - Level 9 (Log9) :
83 * - Level 10 (Log10): TLBs.
84 * - Level 11 (Log11): Unmasked FPU exceptions.
85 *
86 * The \"IEM_MEM\" log group covers most of memory related details logging,
87 * except for errors and exceptions:
88 * - Level 1 (Log) : Reads.
89 * - Level 2 (Log2) : Read fallbacks.
90 * - Level 3 (Log3) : MemMap read.
91 * - Level 4 (Log4) : MemMap read fallbacks.
92 * - Level 5 (Log5) : Writes
93 * - Level 6 (Log6) : Write fallbacks.
94 * - Level 7 (Log7) : MemMap writes and read-writes.
95 * - Level 8 (Log8) : MemMap write and read-write fallbacks.
96 * - Level 9 (Log9) : Stack reads.
97 * - Level 10 (Log10): Stack read fallbacks.
98 * - Level 11 (Log11): Stack writes.
99 * - Level 12 (Log12): Stack write fallbacks.
100 * - Flow (LogFlow) :
101 *
102 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
103 * - Level 1 (Log) : Errors and other major events.
104 * - Flow (LogFlow) : Misc flow stuff (cleanup?)
105 * - Level 2 (Log2) : VM exits.
106 *
107 * The syscall logging level assignments:
108 * - Level 1: DOS and BIOS.
109 * - Level 2: Windows 3.x
110 * - Level 3: Linux.
111 */
112
113/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
114#ifdef _MSC_VER
115# pragma warning(disable:4505)
116#endif
117
118
119/*********************************************************************************************************************************
120* Header Files *
121*********************************************************************************************************************************/
122#define LOG_GROUP LOG_GROUP_IEM
123#define VMCPU_INCL_CPUM_GST_CTX
124#include <VBox/vmm/iem.h>
125#include <VBox/vmm/cpum.h>
126#include <VBox/vmm/apic.h>
127#include <VBox/vmm/pdm.h>
128#include <VBox/vmm/pgm.h>
129#include <VBox/vmm/iom.h>
130#include <VBox/vmm/em.h>
131#include <VBox/vmm/hm.h>
132#include <VBox/vmm/nem.h>
133#include <VBox/vmm/gcm.h>
134#include <VBox/vmm/gim.h>
135#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
136# include <VBox/vmm/em.h>
137# include <VBox/vmm/hm_svm.h>
138#endif
139#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
140# include <VBox/vmm/hmvmxinline.h>
141#endif
142#include <VBox/vmm/tm.h>
143#include <VBox/vmm/dbgf.h>
144#include <VBox/vmm/dbgftrace.h>
145#include "IEMInternal.h"
146#include <VBox/vmm/vmcc.h>
147#include <VBox/log.h>
148#include <VBox/err.h>
149#include <VBox/param.h>
150#include <VBox/dis.h>
151#include <iprt/asm-math.h>
152#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
153# include <iprt/asm-amd64-x86.h>
154#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
155# include <iprt/asm-arm.h>
156#endif
157#include <iprt/assert.h>
158#include <iprt/string.h>
159#include <iprt/x86.h>
160
161#include "IEMInline.h"
162
163
164/*********************************************************************************************************************************
165* Structures and Typedefs *
166*********************************************************************************************************************************/
167/**
168 * CPU exception classes.
169 */
170typedef enum IEMXCPTCLASS
171{
172 IEMXCPTCLASS_BENIGN,
173 IEMXCPTCLASS_CONTRIBUTORY,
174 IEMXCPTCLASS_PAGE_FAULT,
175 IEMXCPTCLASS_DOUBLE_FAULT
176} IEMXCPTCLASS;
177
178
179/*********************************************************************************************************************************
180* Global Variables *
181*********************************************************************************************************************************/
182#if defined(IEM_LOG_MEMORY_WRITES)
183/** What IEM just wrote. */
184uint8_t g_abIemWrote[256];
185/** How much IEM just wrote. */
186size_t g_cbIemWrote;
187#endif
188
189
190/*********************************************************************************************************************************
191* Internal Functions *
192*********************************************************************************************************************************/
193static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
194 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
195
196
197/**
198 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags, slow code
199 * path.
200 *
201 * @returns IEM_F_BRK_PENDING_XXX or zero.
202 * @param pVCpu The cross context virtual CPU structure of the
203 * calling thread.
204 *
205 * @note Don't call directly, use iemCalcExecDbgFlags instead.
206 */
207uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu)
208{
209 uint32_t fExec = 0;
210
211 /*
212 * Process guest breakpoints.
213 */
214#define PROCESS_ONE_BP(a_fDr7, a_iBp) do { \
215 if (a_fDr7 & X86_DR7_L_G(a_iBp)) \
216 { \
217 switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \
218 { \
219 case X86_DR7_RW_EO: \
220 fExec |= IEM_F_PENDING_BRK_INSTR; \
221 break; \
222 case X86_DR7_RW_WO: \
223 case X86_DR7_RW_RW: \
224 fExec |= IEM_F_PENDING_BRK_DATA; \
225 break; \
226 case X86_DR7_RW_IO: \
227 fExec |= IEM_F_PENDING_BRK_X86_IO; \
228 break; \
229 } \
230 } \
231 } while (0)
232
233 uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];
234 if (fGstDr7 & X86_DR7_ENABLED_MASK)
235 {
236 PROCESS_ONE_BP(fGstDr7, 0);
237 PROCESS_ONE_BP(fGstDr7, 1);
238 PROCESS_ONE_BP(fGstDr7, 2);
239 PROCESS_ONE_BP(fGstDr7, 3);
240 }
241
242 /*
243 * Process hypervisor breakpoints.
244 */
245 uint32_t const fHyperDr7 = DBGFBpGetDR7(pVCpu->CTX_SUFF(pVM));
246 if (fHyperDr7 & X86_DR7_ENABLED_MASK)
247 {
248 PROCESS_ONE_BP(fHyperDr7, 0);
249 PROCESS_ONE_BP(fHyperDr7, 1);
250 PROCESS_ONE_BP(fHyperDr7, 2);
251 PROCESS_ONE_BP(fHyperDr7, 3);
252 }
253
254 return fExec;
255}
256
257
258/**
259 * Initializes the decoder state.
260 *
261 * iemReInitDecoder is mostly a copy of this function.
262 *
263 * @param pVCpu The cross context virtual CPU structure of the
264 * calling thread.
265 * @param fExecOpts Optional execution flags:
266 * - IEM_F_BYPASS_HANDLERS
267 * - IEM_F_X86_DISREGARD_LOCK
268 */
269DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, uint32_t fExecOpts)
270{
271 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
272 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
273 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
274 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
275 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
276 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
277 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
278 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
279 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
280 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
281
282 /* Execution state: */
283 uint32_t fExec;
284 pVCpu->iem.s.fExec = fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
285
286 /* Decoder state: */
287 pVCpu->iem.s.enmDefAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
288 pVCpu->iem.s.enmEffAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK;
289 if ((fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)
290 {
291 pVCpu->iem.s.enmDefOpSize = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
292 pVCpu->iem.s.enmEffOpSize = fExec & IEM_F_MODE_CPUMODE_MASK;
293 }
294 else
295 {
296 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
297 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
298 }
299 pVCpu->iem.s.fPrefixes = 0;
300 pVCpu->iem.s.uRexReg = 0;
301 pVCpu->iem.s.uRexB = 0;
302 pVCpu->iem.s.uRexIndex = 0;
303 pVCpu->iem.s.idxPrefix = 0;
304 pVCpu->iem.s.uVex3rdReg = 0;
305 pVCpu->iem.s.uVexLength = 0;
306 pVCpu->iem.s.fEvexStuff = 0;
307 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
308#ifdef IEM_WITH_CODE_TLB
309 pVCpu->iem.s.pbInstrBuf = NULL;
310 pVCpu->iem.s.offInstrNextByte = 0;
311 pVCpu->iem.s.offCurInstrStart = 0;
312# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
313 pVCpu->iem.s.offOpcode = 0;
314# endif
315# ifdef VBOX_STRICT
316 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
317 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
318 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
319 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
320# endif
321#else
322 pVCpu->iem.s.offOpcode = 0;
323 pVCpu->iem.s.cbOpcode = 0;
324#endif
325 pVCpu->iem.s.offModRm = 0;
326 pVCpu->iem.s.cActiveMappings = 0;
327 pVCpu->iem.s.iNextMapping = 0;
328 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
329
330#ifdef DBGFTRACE_ENABLED
331 switch (IEM_GET_CPU_MODE(pVCpu))
332 {
333 case IEMMODE_64BIT:
334 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
335 break;
336 case IEMMODE_32BIT:
337 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
338 break;
339 case IEMMODE_16BIT:
340 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
341 break;
342 }
343#endif
344}
345
346
347/**
348 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
349 *
350 * This is mostly a copy of iemInitDecoder.
351 *
352 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
353 */
354DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
355{
356 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
357 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
358 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
359 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
360 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
361 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
362 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
363 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
364 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
365
366 /* ASSUMES: Anyone changing CPU state affecting the fExec bits will update them! */
367 AssertMsg((pVCpu->iem.s.fExec & ~IEM_F_USER_OPTS) == iemCalcExecFlags(pVCpu),
368 ("fExec=%#x iemCalcExecModeFlags=%#x\n", pVCpu->iem.s.fExec, iemCalcExecFlags(pVCpu)));
369
370 IEMMODE const enmMode = IEM_GET_CPU_MODE(pVCpu);
371 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
372 pVCpu->iem.s.enmEffAddrMode = enmMode;
373 if (enmMode != IEMMODE_64BIT)
374 {
375 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
376 pVCpu->iem.s.enmEffOpSize = enmMode;
377 }
378 else
379 {
380 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
381 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
382 }
383 pVCpu->iem.s.fPrefixes = 0;
384 pVCpu->iem.s.uRexReg = 0;
385 pVCpu->iem.s.uRexB = 0;
386 pVCpu->iem.s.uRexIndex = 0;
387 pVCpu->iem.s.idxPrefix = 0;
388 pVCpu->iem.s.uVex3rdReg = 0;
389 pVCpu->iem.s.uVexLength = 0;
390 pVCpu->iem.s.fEvexStuff = 0;
391 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
392#ifdef IEM_WITH_CODE_TLB
393 if (pVCpu->iem.s.pbInstrBuf)
394 {
395 uint64_t off = (enmMode == IEMMODE_64BIT
396 ? pVCpu->cpum.GstCtx.rip
397 : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
398 - pVCpu->iem.s.uInstrBufPc;
399 if (off < pVCpu->iem.s.cbInstrBufTotal)
400 {
401 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
402 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
403 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
404 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
405 else
406 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
407 }
408 else
409 {
410 pVCpu->iem.s.pbInstrBuf = NULL;
411 pVCpu->iem.s.offInstrNextByte = 0;
412 pVCpu->iem.s.offCurInstrStart = 0;
413 pVCpu->iem.s.cbInstrBuf = 0;
414 pVCpu->iem.s.cbInstrBufTotal = 0;
415 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
416 }
417 }
418 else
419 {
420 pVCpu->iem.s.offInstrNextByte = 0;
421 pVCpu->iem.s.offCurInstrStart = 0;
422 pVCpu->iem.s.cbInstrBuf = 0;
423 pVCpu->iem.s.cbInstrBufTotal = 0;
424# ifdef VBOX_STRICT
425 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
426# endif
427 }
428# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
429 pVCpu->iem.s.offOpcode = 0;
430# endif
431#else /* !IEM_WITH_CODE_TLB */
432 pVCpu->iem.s.cbOpcode = 0;
433 pVCpu->iem.s.offOpcode = 0;
434#endif /* !IEM_WITH_CODE_TLB */
435 pVCpu->iem.s.offModRm = 0;
436 Assert(pVCpu->iem.s.cActiveMappings == 0);
437 pVCpu->iem.s.iNextMapping = 0;
438 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
439 Assert(!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS));
440
441#ifdef DBGFTRACE_ENABLED
442 switch (enmMode)
443 {
444 case IEMMODE_64BIT:
445 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
446 break;
447 case IEMMODE_32BIT:
448 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
449 break;
450 case IEMMODE_16BIT:
451 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
452 break;
453 }
454#endif
455}
456
457
458
459/**
460 * Prefetch opcodes the first time when starting executing.
461 *
462 * @returns Strict VBox status code.
463 * @param pVCpu The cross context virtual CPU structure of the
464 * calling thread.
465 * @param fExecOpts Optional execution flags:
466 * - IEM_F_BYPASS_HANDLERS
467 * - IEM_F_X86_DISREGARD_LOCK
468 */
469static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
470{
471 iemInitDecoder(pVCpu, fExecOpts);
472
473#ifndef IEM_WITH_CODE_TLB
474 /*
475 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
476 *
477 * First translate CS:rIP to a physical address.
478 *
479 * Note! The iemOpcodeFetchMoreBytes code depends on this here code to fetch
480 * all relevant bytes from the first page, as it ASSUMES it's only ever
481 * called for dealing with CS.LIM, page crossing and instructions that
482 * are too long.
483 */
484 uint32_t cbToTryRead;
485 RTGCPTR GCPtrPC;
486 if (IEM_IS_64BIT_CODE(pVCpu))
487 {
488 cbToTryRead = GUEST_PAGE_SIZE;
489 GCPtrPC = pVCpu->cpum.GstCtx.rip;
490 if (IEM_IS_CANONICAL(GCPtrPC))
491 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
492 else
493 return iemRaiseGeneralProtectionFault0(pVCpu);
494 }
495 else
496 {
497 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
498 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu), ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
499 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
500 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
501 else
502 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
503 if (cbToTryRead) { /* likely */ }
504 else /* overflowed */
505 {
506 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
507 cbToTryRead = UINT32_MAX;
508 }
509 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
510 Assert(GCPtrPC <= UINT32_MAX);
511 }
512
513 PGMPTWALK Walk;
514 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk);
515 if (RT_SUCCESS(rc))
516 Assert(Walk.fSucceeded); /* probable. */
517 else
518 {
519 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
520# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
521 if (Walk.fFailed & PGM_WALKFAIL_EPT)
522 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
523# endif
524 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);
525 }
526 if ((Walk.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3) { /* likely */ }
527 else
528 {
529 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
530# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
531 if (Walk.fFailed & PGM_WALKFAIL_EPT)
532 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
533# endif
534 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
535 }
536 if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
537 else
538 {
539 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
540# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
541 if (Walk.fFailed & PGM_WALKFAIL_EPT)
542 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
543# endif
544 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
545 }
546 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
547 /** @todo Check reserved bits and such stuff. PGM is better at doing
548 * that, so do it when implementing the guest virtual address
549 * TLB... */
550
551 /*
552 * Read the bytes at this address.
553 */
554 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
555 if (cbToTryRead > cbLeftOnPage)
556 cbToTryRead = cbLeftOnPage;
557 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
558 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
559
560 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
561 {
562 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
563 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
564 { /* likely */ }
565 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
566 {
567 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
568 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
569 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
570 }
571 else
572 {
573 Log((RT_SUCCESS(rcStrict)
574 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
575 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
576 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
577 return rcStrict;
578 }
579 }
580 else
581 {
582 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
583 if (RT_SUCCESS(rc))
584 { /* likely */ }
585 else
586 {
587 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
588 GCPtrPC, GCPhys, rc, cbToTryRead));
589 return rc;
590 }
591 }
592 pVCpu->iem.s.cbOpcode = cbToTryRead;
593#endif /* !IEM_WITH_CODE_TLB */
594 return VINF_SUCCESS;
595}
596
597
598/**
599 * Invalidates the IEM TLBs.
600 *
601 * This is called internally as well as by PGM when moving GC mappings.
602 *
603 * @param pVCpu The cross context virtual CPU structure of the calling
604 * thread.
605 */
606VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
607{
608#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
609 Log10(("IEMTlbInvalidateAll\n"));
610# ifdef IEM_WITH_CODE_TLB
611 pVCpu->iem.s.cbInstrBufTotal = 0;
612 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
613 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
614 { /* very likely */ }
615 else
616 {
617 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
618 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
619 while (i-- > 0)
620 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
621 }
622# endif
623
624# ifdef IEM_WITH_DATA_TLB
625 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
626 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
627 { /* very likely */ }
628 else
629 {
630 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
631 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
632 while (i-- > 0)
633 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
634 }
635# endif
636#else
637 RT_NOREF(pVCpu);
638#endif
639}
640
641
642/**
643 * Invalidates a page in the TLBs.
644 *
645 * @param pVCpu The cross context virtual CPU structure of the calling
646 * thread.
647 * @param GCPtr The address of the page to invalidate
648 * @thread EMT(pVCpu)
649 */
650VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
651{
652#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
653 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
654 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
655 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
656 uintptr_t const idx = IEMTLB_TAG_TO_INDEX(GCPtr);
657
658# ifdef IEM_WITH_CODE_TLB
659 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
660 {
661 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
662 if (GCPtr == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
663 pVCpu->iem.s.cbInstrBufTotal = 0;
664 }
665# endif
666
667# ifdef IEM_WITH_DATA_TLB
668 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
669 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
670# endif
671#else
672 NOREF(pVCpu); NOREF(GCPtr);
673#endif
674}
675
676
677#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
678/**
679 * Invalid both TLBs slow fashion following a rollover.
680 *
681 * Worker for IEMTlbInvalidateAllPhysical,
682 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
683 * iemMemMapJmp and others.
684 *
685 * @thread EMT(pVCpu)
686 */
687static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
688{
689 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
690 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
691 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
692
693 unsigned i;
694# ifdef IEM_WITH_CODE_TLB
695 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
696 while (i-- > 0)
697 {
698 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
699 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
700 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
701 }
702# endif
703# ifdef IEM_WITH_DATA_TLB
704 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
705 while (i-- > 0)
706 {
707 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
708 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
709 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
710 }
711# endif
712
713}
714#endif
715
716
717/**
718 * Invalidates the host physical aspects of the IEM TLBs.
719 *
720 * This is called internally as well as by PGM when moving GC mappings.
721 *
722 * @param pVCpu The cross context virtual CPU structure of the calling
723 * thread.
724 * @note Currently not used.
725 */
726VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
727{
728#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
729 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
730 Log10(("IEMTlbInvalidateAllPhysical\n"));
731
732# ifdef IEM_WITH_CODE_TLB
733 pVCpu->iem.s.cbInstrBufTotal = 0;
734# endif
735 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
736 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
737 {
738 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
739 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
740 }
741 else
742 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
743#else
744 NOREF(pVCpu);
745#endif
746}
747
748
749/**
750 * Invalidates the host physical aspects of the IEM TLBs.
751 *
752 * This is called internally as well as by PGM when moving GC mappings.
753 *
754 * @param pVM The cross context VM structure.
755 * @param idCpuCaller The ID of the calling EMT if available to the caller,
756 * otherwise NIL_VMCPUID.
757 * @param enmReason The reason we're called.
758 *
759 * @remarks Caller holds the PGM lock.
760 */
761VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller, IEMTLBPHYSFLUSHREASON enmReason)
762{
763#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
764 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
765 if (pVCpuCaller)
766 VMCPU_ASSERT_EMT(pVCpuCaller);
767 Log10(("IEMTlbInvalidateAllPhysicalAllCpus: %d\n", enmReason)); RT_NOREF(enmReason);
768
769 VMCC_FOR_EACH_VMCPU(pVM)
770 {
771# ifdef IEM_WITH_CODE_TLB
772 if (pVCpuCaller == pVCpu)
773 pVCpu->iem.s.cbInstrBufTotal = 0;
774# endif
775
776 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
777 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
778 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
779 { /* likely */}
780 else if (pVCpuCaller != pVCpu)
781 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
782 else
783 {
784 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
785 continue;
786 }
787 ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
788 ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
789 }
790 VMCC_FOR_EACH_VMCPU_END(pVM);
791
792#else
793 RT_NOREF(pVM, idCpuCaller, enmReason);
794#endif
795}
796
797
798/**
799 * Flushes the prefetch buffer, light version.
800 */
801void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr)
802{
803#ifndef IEM_WITH_CODE_TLB
804 pVCpu->iem.s.cbOpcode = cbInstr;
805#else
806 RT_NOREF(pVCpu, cbInstr);
807#endif
808}
809
810
811/**
812 * Flushes the prefetch buffer, heavy version.
813 */
814void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr)
815{
816#ifndef IEM_WITH_CODE_TLB
817 pVCpu->iem.s.cbOpcode = cbInstr; /* Note! SVM and VT-x may set this to zero on exit, rather than the instruction length. */
818#elif 1
819 pVCpu->iem.s.cbInstrBufTotal = 0;
820 RT_NOREF(cbInstr);
821#else
822 RT_NOREF(pVCpu, cbInstr);
823#endif
824}
825
826
827
828#ifdef IEM_WITH_CODE_TLB
829
830/**
831 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
832 * failure and jumps.
833 *
834 * We end up here for a number of reasons:
835 * - pbInstrBuf isn't yet initialized.
836 * - Advancing beyond the buffer boundrary (e.g. cross page).
837 * - Advancing beyond the CS segment limit.
838 * - Fetching from non-mappable page (e.g. MMIO).
839 * - TLB loading in the recompiler (@a pvDst = NULL, @a cbDst = 0).
840 *
841 * @param pVCpu The cross context virtual CPU structure of the
842 * calling thread.
843 * @param pvDst Where to return the bytes.
844 * @param cbDst Number of bytes to read. A value of zero is
845 * allowed for initializing pbInstrBuf (the
846 * recompiler does this). In this case it is best
847 * to set pbInstrBuf to NULL prior to the call.
848 */
849void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP
850{
851# ifdef IN_RING3
852 for (;;)
853 {
854 Assert(cbDst <= 8);
855 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
856
857 /*
858 * We might have a partial buffer match, deal with that first to make the
859 * rest simpler. This is the first part of the cross page/buffer case.
860 */
861 uint8_t const * const pbInstrBuf = pVCpu->iem.s.pbInstrBuf;
862 if (pbInstrBuf != NULL)
863 {
864 Assert(cbDst != 0); /* pbInstrBuf shall be NULL in case of a TLB load */
865 uint32_t const cbInstrBuf = pVCpu->iem.s.cbInstrBuf;
866 if (offBuf < cbInstrBuf)
867 {
868 Assert(offBuf + cbDst > cbInstrBuf);
869 uint32_t const cbCopy = cbInstrBuf - offBuf;
870 memcpy(pvDst, &pbInstrBuf[offBuf], cbCopy);
871
872 cbDst -= cbCopy;
873 pvDst = (uint8_t *)pvDst + cbCopy;
874 offBuf += cbCopy;
875 }
876 }
877
878 /*
879 * Check segment limit, figuring how much we're allowed to access at this point.
880 *
881 * We will fault immediately if RIP is past the segment limit / in non-canonical
882 * territory. If we do continue, there are one or more bytes to read before we
883 * end up in trouble and we need to do that first before faulting.
884 */
885 RTGCPTR GCPtrFirst;
886 uint32_t cbMaxRead;
887 if (IEM_IS_64BIT_CODE(pVCpu))
888 {
889 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
890 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
891 { /* likely */ }
892 else
893 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
894 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
895 }
896 else
897 {
898 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
899 /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
900 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
901 { /* likely */ }
902 else /** @todo For CPUs older than the 386, we should not necessarily generate \#GP here but wrap around! */
903 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
904 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
905 if (cbMaxRead != 0)
906 { /* likely */ }
907 else
908 {
909 /* Overflowed because address is 0 and limit is max. */
910 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
911 cbMaxRead = X86_PAGE_SIZE;
912 }
913 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
914 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
915 if (cbMaxRead2 < cbMaxRead)
916 cbMaxRead = cbMaxRead2;
917 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
918 }
919
920 /*
921 * Get the TLB entry for this piece of code.
922 */
923 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.CodeTlb, GCPtrFirst);
924 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.CodeTlb, uTag);
925 if (pTlbe->uTag == uTag)
926 {
927 /* likely when executing lots of code, otherwise unlikely */
928# ifdef VBOX_WITH_STATISTICS
929 pVCpu->iem.s.CodeTlb.cTlbHits++;
930# endif
931 }
932 else
933 {
934 pVCpu->iem.s.CodeTlb.cTlbMisses++;
935 PGMPTWALK Walk;
936 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk);
937 if (RT_FAILURE(rc))
938 {
939#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
940 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? */
941 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
942#endif
943 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
944 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, rc);
945 }
946
947 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
948 Assert(Walk.fSucceeded);
949 pTlbe->uTag = uTag;
950 pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
951 | (Walk.fEffective >> X86_PTE_PAE_BIT_NX);
952 pTlbe->GCPhys = Walk.GCPhys;
953 pTlbe->pbMappingR3 = NULL;
954 }
955
956 /*
957 * Check TLB page table level access flags.
958 */
959 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
960 {
961 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && IEM_GET_CPL(pVCpu) == 3)
962 {
963 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
964 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
965 }
966 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
967 {
968 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
969 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
970 }
971 }
972
973 /*
974 * Set the accessed flags.
975 * ASSUMES this is set when the address is translated rather than on commit...
976 */
977 /** @todo testcase: check when the A bit are actually set by the CPU for code. */
978 if (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED)
979 {
980 int rc2 = PGMGstModifyPage(pVCpu, GCPtrFirst, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
981 AssertRC(rc2);
982 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
983 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
984 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_F_PT_NO_ACCESSED;
985 }
986
987 /*
988 * Look up the physical page info if necessary.
989 */
990 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
991 { /* not necessary */ }
992 else
993 {
994 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
995 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
996 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
997 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
998 AssertCompile(PGMIEMGCPHYS2PTR_F_CODE_PAGE == IEMTLBE_F_PG_CODE_PAGE);
999 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
1000 { /* likely */ }
1001 else
1002 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
1003 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1004 | IEMTLBE_F_NO_MAPPINGR3
1005 | IEMTLBE_F_PG_NO_READ
1006 | IEMTLBE_F_PG_NO_WRITE
1007 | IEMTLBE_F_PG_UNASSIGNED
1008 | IEMTLBE_F_PG_CODE_PAGE);
1009 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1010 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1011 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
1012 }
1013
1014# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
1015 /*
1016 * Try do a direct read using the pbMappingR3 pointer.
1017 */
1018 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1019 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1020 {
1021 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1022 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1023 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1024 {
1025 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1026 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1027 }
1028 else
1029 {
1030 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1031 if (cbInstr + (uint32_t)cbDst <= 15)
1032 {
1033 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1034 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1035 }
1036 else
1037 {
1038 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1039 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1040 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1041 }
1042 }
1043 if (cbDst <= cbMaxRead)
1044 {
1045 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0; /** @todo Spurious load effect on branch handling? */
1046 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1047
1048 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1049 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1050 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1051 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1052 if (cbDst > 0) /* To make ASAN happy in the TLB load case. */
1053 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1054 else
1055 Assert(!pvDst);
1056 return;
1057 }
1058 pVCpu->iem.s.pbInstrBuf = NULL;
1059
1060 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1061 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1062 }
1063# else
1064# error "refactor as needed"
1065 /*
1066 * If there is no special read handling, so we can read a bit more and
1067 * put it in the prefetch buffer.
1068 */
1069 if ( cbDst < cbMaxRead
1070 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1071 {
1072 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1073 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1074 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1075 { /* likely */ }
1076 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1077 {
1078 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1079 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1080 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1081 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));
1082 }
1083 else
1084 {
1085 Log((RT_SUCCESS(rcStrict)
1086 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1087 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1088 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1089 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1090 }
1091 }
1092# endif
1093 /*
1094 * Special read handling, so only read exactly what's needed.
1095 * This is a highly unlikely scenario.
1096 */
1097 else
1098 {
1099 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1100
1101 /* Check instruction length. */
1102 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1103 if (RT_LIKELY(cbInstr + cbDst <= 15))
1104 { /* likely */ }
1105 else
1106 {
1107 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0) [slow]\n",
1108 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1109 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1110 }
1111
1112 /* Do the reading. */
1113 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1114 if (cbToRead > 0)
1115 {
1116 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1117 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1118 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1119 { /* likely */ }
1120 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1121 {
1122 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1123 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1124 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1125 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));
1126 }
1127 else
1128 {
1129 Log((RT_SUCCESS(rcStrict)
1130 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1131 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1132 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1133 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1134 }
1135 }
1136
1137 /* Update the state and probably return. */
1138 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1139 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0;
1140 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1141
1142 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1143 pVCpu->iem.s.offInstrNextByte = offPg + cbInstr + cbToRead;
1144 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead + cbInstr) - cbToRead - cbInstr;
1145 pVCpu->iem.s.cbInstrBufTotal = X86_PAGE_SIZE; /** @todo ??? */
1146 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1147 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1148 pVCpu->iem.s.pbInstrBuf = NULL;
1149 if (cbToRead == cbDst)
1150 return;
1151 Assert(cbToRead == cbMaxRead);
1152 }
1153
1154 /*
1155 * More to read, loop.
1156 */
1157 cbDst -= cbMaxRead;
1158 pvDst = (uint8_t *)pvDst + cbMaxRead;
1159 }
1160# else /* !IN_RING3 */
1161 RT_NOREF(pvDst, cbDst);
1162 if (pvDst || cbDst)
1163 IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);
1164# endif /* !IN_RING3 */
1165}
1166
1167#else /* !IEM_WITH_CODE_TLB */
1168
1169/**
1170 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1171 * exception if it fails.
1172 *
1173 * @returns Strict VBox status code.
1174 * @param pVCpu The cross context virtual CPU structure of the
1175 * calling thread.
1176 * @param cbMin The minimum number of bytes relative offOpcode
1177 * that must be read.
1178 */
1179VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
1180{
1181 /*
1182 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1183 *
1184 * First translate CS:rIP to a physical address.
1185 */
1186 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;
1187 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1188 uint8_t const cbLeft = cbOpcode - offOpcode;
1189 Assert(cbLeft < cbMin);
1190 Assert(cbOpcode <= sizeof(pVCpu->iem.s.abOpcode));
1191
1192 uint32_t cbToTryRead;
1193 RTGCPTR GCPtrNext;
1194 if (IEM_IS_64BIT_CODE(pVCpu))
1195 {
1196 GCPtrNext = pVCpu->cpum.GstCtx.rip + cbOpcode;
1197 if (!IEM_IS_CANONICAL(GCPtrNext))
1198 return iemRaiseGeneralProtectionFault0(pVCpu);
1199 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1200 }
1201 else
1202 {
1203 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1204 /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
1205 GCPtrNext32 += cbOpcode;
1206 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1207 /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
1208 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1209 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1210 if (!cbToTryRead) /* overflowed */
1211 {
1212 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1213 cbToTryRead = UINT32_MAX;
1214 /** @todo check out wrapping around the code segment. */
1215 }
1216 if (cbToTryRead < cbMin - cbLeft)
1217 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1218 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1219
1220 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1221 if (cbToTryRead > cbLeftOnPage)
1222 cbToTryRead = cbLeftOnPage;
1223 }
1224
1225 /* Restrict to opcode buffer space.
1226
1227 We're making ASSUMPTIONS here based on work done previously in
1228 iemInitDecoderAndPrefetchOpcodes, where bytes from the first page will
1229 be fetched in case of an instruction crossing two pages. */
1230 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - cbOpcode)
1231 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - cbOpcode;
1232 if (RT_LIKELY(cbToTryRead + cbLeft >= cbMin))
1233 { /* likely */ }
1234 else
1235 {
1236 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1237 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, offOpcode, cbMin));
1238 return iemRaiseGeneralProtectionFault0(pVCpu);
1239 }
1240
1241 PGMPTWALK Walk;
1242 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk);
1243 if (RT_FAILURE(rc))
1244 {
1245 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1246#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1247 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1248 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1249#endif
1250 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc);
1251 }
1252 if (!(Walk.fEffective & X86_PTE_US) && IEM_GET_CPL(pVCpu) == 3)
1253 {
1254 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1255#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1256 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1257 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1258#endif
1259 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1260 }
1261 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1262 {
1263 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1264#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1265 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1266 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1267#endif
1268 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1269 }
1270 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1271 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode));
1272 /** @todo Check reserved bits and such stuff. PGM is better at doing
1273 * that, so do it when implementing the guest virtual address
1274 * TLB... */
1275
1276 /*
1277 * Read the bytes at this address.
1278 *
1279 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1280 * and since PATM should only patch the start of an instruction there
1281 * should be no need to check again here.
1282 */
1283 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
1284 {
1285 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[cbOpcode],
1286 cbToTryRead, PGMACCESSORIGIN_IEM);
1287 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1288 { /* likely */ }
1289 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1290 {
1291 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1292 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1293 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1294 }
1295 else
1296 {
1297 Log((RT_SUCCESS(rcStrict)
1298 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1299 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1300 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1301 return rcStrict;
1302 }
1303 }
1304 else
1305 {
1306 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[cbOpcode], GCPhys, cbToTryRead);
1307 if (RT_SUCCESS(rc))
1308 { /* likely */ }
1309 else
1310 {
1311 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1312 return rc;
1313 }
1314 }
1315 pVCpu->iem.s.cbOpcode = cbOpcode + cbToTryRead;
1316 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1317
1318 return VINF_SUCCESS;
1319}
1320
1321#endif /* !IEM_WITH_CODE_TLB */
1322#ifndef IEM_WITH_SETJMP
1323
1324/**
1325 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1326 *
1327 * @returns Strict VBox status code.
1328 * @param pVCpu The cross context virtual CPU structure of the
1329 * calling thread.
1330 * @param pb Where to return the opcode byte.
1331 */
1332VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1333{
1334 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1335 if (rcStrict == VINF_SUCCESS)
1336 {
1337 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1338 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1339 pVCpu->iem.s.offOpcode = offOpcode + 1;
1340 }
1341 else
1342 *pb = 0;
1343 return rcStrict;
1344}
1345
1346#else /* IEM_WITH_SETJMP */
1347
1348/**
1349 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1350 *
1351 * @returns The opcode byte.
1352 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1353 */
1354uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1355{
1356# ifdef IEM_WITH_CODE_TLB
1357 uint8_t u8;
1358 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1359 return u8;
1360# else
1361 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1362 if (rcStrict == VINF_SUCCESS)
1363 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1364 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1365# endif
1366}
1367
1368#endif /* IEM_WITH_SETJMP */
1369
1370#ifndef IEM_WITH_SETJMP
1371
1372/**
1373 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1374 *
1375 * @returns Strict VBox status code.
1376 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1377 * @param pu16 Where to return the opcode dword.
1378 */
1379VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1380{
1381 uint8_t u8;
1382 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1383 if (rcStrict == VINF_SUCCESS)
1384 *pu16 = (int8_t)u8;
1385 return rcStrict;
1386}
1387
1388
1389/**
1390 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1391 *
1392 * @returns Strict VBox status code.
1393 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1394 * @param pu32 Where to return the opcode dword.
1395 */
1396VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1397{
1398 uint8_t u8;
1399 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1400 if (rcStrict == VINF_SUCCESS)
1401 *pu32 = (int8_t)u8;
1402 return rcStrict;
1403}
1404
1405
1406/**
1407 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1408 *
1409 * @returns Strict VBox status code.
1410 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1411 * @param pu64 Where to return the opcode qword.
1412 */
1413VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1414{
1415 uint8_t u8;
1416 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1417 if (rcStrict == VINF_SUCCESS)
1418 *pu64 = (int8_t)u8;
1419 return rcStrict;
1420}
1421
1422#endif /* !IEM_WITH_SETJMP */
1423
1424
1425#ifndef IEM_WITH_SETJMP
1426
1427/**
1428 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1429 *
1430 * @returns Strict VBox status code.
1431 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1432 * @param pu16 Where to return the opcode word.
1433 */
1434VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1435{
1436 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1437 if (rcStrict == VINF_SUCCESS)
1438 {
1439 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1440# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1441 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1442# else
1443 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1444# endif
1445 pVCpu->iem.s.offOpcode = offOpcode + 2;
1446 }
1447 else
1448 *pu16 = 0;
1449 return rcStrict;
1450}
1451
1452#else /* IEM_WITH_SETJMP */
1453
1454/**
1455 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1456 *
1457 * @returns The opcode word.
1458 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1459 */
1460uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1461{
1462# ifdef IEM_WITH_CODE_TLB
1463 uint16_t u16;
1464 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1465 return u16;
1466# else
1467 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1468 if (rcStrict == VINF_SUCCESS)
1469 {
1470 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1471 pVCpu->iem.s.offOpcode += 2;
1472# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1473 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1474# else
1475 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1476# endif
1477 }
1478 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1479# endif
1480}
1481
1482#endif /* IEM_WITH_SETJMP */
1483
1484#ifndef IEM_WITH_SETJMP
1485
1486/**
1487 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1488 *
1489 * @returns Strict VBox status code.
1490 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1491 * @param pu32 Where to return the opcode double word.
1492 */
1493VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1494{
1495 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1496 if (rcStrict == VINF_SUCCESS)
1497 {
1498 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1499 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1500 pVCpu->iem.s.offOpcode = offOpcode + 2;
1501 }
1502 else
1503 *pu32 = 0;
1504 return rcStrict;
1505}
1506
1507
1508/**
1509 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1510 *
1511 * @returns Strict VBox status code.
1512 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1513 * @param pu64 Where to return the opcode quad word.
1514 */
1515VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1516{
1517 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1518 if (rcStrict == VINF_SUCCESS)
1519 {
1520 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1521 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1522 pVCpu->iem.s.offOpcode = offOpcode + 2;
1523 }
1524 else
1525 *pu64 = 0;
1526 return rcStrict;
1527}
1528
1529#endif /* !IEM_WITH_SETJMP */
1530
1531#ifndef IEM_WITH_SETJMP
1532
1533/**
1534 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1535 *
1536 * @returns Strict VBox status code.
1537 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1538 * @param pu32 Where to return the opcode dword.
1539 */
1540VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1541{
1542 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1543 if (rcStrict == VINF_SUCCESS)
1544 {
1545 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1546# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1547 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1548# else
1549 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1550 pVCpu->iem.s.abOpcode[offOpcode + 1],
1551 pVCpu->iem.s.abOpcode[offOpcode + 2],
1552 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1553# endif
1554 pVCpu->iem.s.offOpcode = offOpcode + 4;
1555 }
1556 else
1557 *pu32 = 0;
1558 return rcStrict;
1559}
1560
1561#else /* IEM_WITH_SETJMP */
1562
1563/**
1564 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1565 *
1566 * @returns The opcode dword.
1567 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1568 */
1569uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1570{
1571# ifdef IEM_WITH_CODE_TLB
1572 uint32_t u32;
1573 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1574 return u32;
1575# else
1576 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1577 if (rcStrict == VINF_SUCCESS)
1578 {
1579 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1580 pVCpu->iem.s.offOpcode = offOpcode + 4;
1581# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1582 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1583# else
1584 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1585 pVCpu->iem.s.abOpcode[offOpcode + 1],
1586 pVCpu->iem.s.abOpcode[offOpcode + 2],
1587 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1588# endif
1589 }
1590 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1591# endif
1592}
1593
1594#endif /* IEM_WITH_SETJMP */
1595
1596#ifndef IEM_WITH_SETJMP
1597
1598/**
1599 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1600 *
1601 * @returns Strict VBox status code.
1602 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1603 * @param pu64 Where to return the opcode dword.
1604 */
1605VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1606{
1607 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1608 if (rcStrict == VINF_SUCCESS)
1609 {
1610 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1611 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1612 pVCpu->iem.s.abOpcode[offOpcode + 1],
1613 pVCpu->iem.s.abOpcode[offOpcode + 2],
1614 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1615 pVCpu->iem.s.offOpcode = offOpcode + 4;
1616 }
1617 else
1618 *pu64 = 0;
1619 return rcStrict;
1620}
1621
1622
1623/**
1624 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1625 *
1626 * @returns Strict VBox status code.
1627 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1628 * @param pu64 Where to return the opcode qword.
1629 */
1630VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1631{
1632 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1633 if (rcStrict == VINF_SUCCESS)
1634 {
1635 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1636 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1637 pVCpu->iem.s.abOpcode[offOpcode + 1],
1638 pVCpu->iem.s.abOpcode[offOpcode + 2],
1639 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1640 pVCpu->iem.s.offOpcode = offOpcode + 4;
1641 }
1642 else
1643 *pu64 = 0;
1644 return rcStrict;
1645}
1646
1647#endif /* !IEM_WITH_SETJMP */
1648
1649#ifndef IEM_WITH_SETJMP
1650
1651/**
1652 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1653 *
1654 * @returns Strict VBox status code.
1655 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1656 * @param pu64 Where to return the opcode qword.
1657 */
1658VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1659{
1660 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1661 if (rcStrict == VINF_SUCCESS)
1662 {
1663 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1664# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1665 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1666# else
1667 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1668 pVCpu->iem.s.abOpcode[offOpcode + 1],
1669 pVCpu->iem.s.abOpcode[offOpcode + 2],
1670 pVCpu->iem.s.abOpcode[offOpcode + 3],
1671 pVCpu->iem.s.abOpcode[offOpcode + 4],
1672 pVCpu->iem.s.abOpcode[offOpcode + 5],
1673 pVCpu->iem.s.abOpcode[offOpcode + 6],
1674 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1675# endif
1676 pVCpu->iem.s.offOpcode = offOpcode + 8;
1677 }
1678 else
1679 *pu64 = 0;
1680 return rcStrict;
1681}
1682
1683#else /* IEM_WITH_SETJMP */
1684
1685/**
1686 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1687 *
1688 * @returns The opcode qword.
1689 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1690 */
1691uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1692{
1693# ifdef IEM_WITH_CODE_TLB
1694 uint64_t u64;
1695 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1696 return u64;
1697# else
1698 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1699 if (rcStrict == VINF_SUCCESS)
1700 {
1701 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1702 pVCpu->iem.s.offOpcode = offOpcode + 8;
1703# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1704 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1705# else
1706 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1707 pVCpu->iem.s.abOpcode[offOpcode + 1],
1708 pVCpu->iem.s.abOpcode[offOpcode + 2],
1709 pVCpu->iem.s.abOpcode[offOpcode + 3],
1710 pVCpu->iem.s.abOpcode[offOpcode + 4],
1711 pVCpu->iem.s.abOpcode[offOpcode + 5],
1712 pVCpu->iem.s.abOpcode[offOpcode + 6],
1713 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1714# endif
1715 }
1716 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1717# endif
1718}
1719
1720#endif /* IEM_WITH_SETJMP */
1721
1722
1723
1724/** @name Misc Worker Functions.
1725 * @{
1726 */
1727
1728/**
1729 * Gets the exception class for the specified exception vector.
1730 *
1731 * @returns The class of the specified exception.
1732 * @param uVector The exception vector.
1733 */
1734static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
1735{
1736 Assert(uVector <= X86_XCPT_LAST);
1737 switch (uVector)
1738 {
1739 case X86_XCPT_DE:
1740 case X86_XCPT_TS:
1741 case X86_XCPT_NP:
1742 case X86_XCPT_SS:
1743 case X86_XCPT_GP:
1744 case X86_XCPT_SX: /* AMD only */
1745 return IEMXCPTCLASS_CONTRIBUTORY;
1746
1747 case X86_XCPT_PF:
1748 case X86_XCPT_VE: /* Intel only */
1749 return IEMXCPTCLASS_PAGE_FAULT;
1750
1751 case X86_XCPT_DF:
1752 return IEMXCPTCLASS_DOUBLE_FAULT;
1753 }
1754 return IEMXCPTCLASS_BENIGN;
1755}
1756
1757
1758/**
1759 * Evaluates how to handle an exception caused during delivery of another event
1760 * (exception / interrupt).
1761 *
1762 * @returns How to handle the recursive exception.
1763 * @param pVCpu The cross context virtual CPU structure of the
1764 * calling thread.
1765 * @param fPrevFlags The flags of the previous event.
1766 * @param uPrevVector The vector of the previous event.
1767 * @param fCurFlags The flags of the current exception.
1768 * @param uCurVector The vector of the current exception.
1769 * @param pfXcptRaiseInfo Where to store additional information about the
1770 * exception condition. Optional.
1771 */
1772VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
1773 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
1774{
1775 /*
1776 * Only CPU exceptions can be raised while delivering other events, software interrupt
1777 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
1778 */
1779 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
1780 Assert(pVCpu); RT_NOREF(pVCpu);
1781 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
1782
1783 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
1784 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
1785 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1786 {
1787 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
1788 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
1789 {
1790 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
1791 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
1792 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
1793 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
1794 {
1795 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1796 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
1797 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
1798 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
1799 uCurVector, pVCpu->cpum.GstCtx.cr2));
1800 }
1801 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1802 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
1803 {
1804 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1805 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
1806 }
1807 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
1808 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1809 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
1810 {
1811 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
1812 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
1813 }
1814 }
1815 else
1816 {
1817 if (uPrevVector == X86_XCPT_NMI)
1818 {
1819 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
1820 if (uCurVector == X86_XCPT_PF)
1821 {
1822 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
1823 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
1824 }
1825 }
1826 else if ( uPrevVector == X86_XCPT_AC
1827 && uCurVector == X86_XCPT_AC)
1828 {
1829 enmRaise = IEMXCPTRAISE_CPU_HANG;
1830 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
1831 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
1832 }
1833 }
1834 }
1835 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
1836 {
1837 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
1838 if (uCurVector == X86_XCPT_PF)
1839 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
1840 }
1841 else
1842 {
1843 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
1844 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
1845 }
1846
1847 if (pfXcptRaiseInfo)
1848 *pfXcptRaiseInfo = fRaiseInfo;
1849 return enmRaise;
1850}
1851
1852
1853/**
1854 * Enters the CPU shutdown state initiated by a triple fault or other
1855 * unrecoverable conditions.
1856 *
1857 * @returns Strict VBox status code.
1858 * @param pVCpu The cross context virtual CPU structure of the
1859 * calling thread.
1860 */
1861static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
1862{
1863 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1864 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
1865
1866 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
1867 {
1868 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
1869 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1870 }
1871
1872 RT_NOREF(pVCpu);
1873 return VINF_EM_TRIPLE_FAULT;
1874}
1875
1876
1877/**
1878 * Validates a new SS segment.
1879 *
1880 * @returns VBox strict status code.
1881 * @param pVCpu The cross context virtual CPU structure of the
1882 * calling thread.
1883 * @param NewSS The new SS selctor.
1884 * @param uCpl The CPL to load the stack for.
1885 * @param pDesc Where to return the descriptor.
1886 */
1887static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
1888{
1889 /* Null selectors are not allowed (we're not called for dispatching
1890 interrupts with SS=0 in long mode). */
1891 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1892 {
1893 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
1894 return iemRaiseTaskSwitchFault0(pVCpu);
1895 }
1896
1897 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1898 if ((NewSS & X86_SEL_RPL) != uCpl)
1899 {
1900 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1901 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1902 }
1903
1904 /*
1905 * Read the descriptor.
1906 */
1907 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
1908 if (rcStrict != VINF_SUCCESS)
1909 return rcStrict;
1910
1911 /*
1912 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1913 */
1914 if (!pDesc->Legacy.Gen.u1DescType)
1915 {
1916 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1917 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1918 }
1919
1920 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1921 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1922 {
1923 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1924 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1925 }
1926 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1927 {
1928 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1929 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1930 }
1931
1932 /* Is it there? */
1933 /** @todo testcase: Is this checked before the canonical / limit check below? */
1934 if (!pDesc->Legacy.Gen.u1Present)
1935 {
1936 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1937 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
1938 }
1939
1940 return VINF_SUCCESS;
1941}
1942
1943/** @} */
1944
1945
1946/** @name Raising Exceptions.
1947 *
1948 * @{
1949 */
1950
1951
1952/**
1953 * Loads the specified stack far pointer from the TSS.
1954 *
1955 * @returns VBox strict status code.
1956 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1957 * @param uCpl The CPL to load the stack for.
1958 * @param pSelSS Where to return the new stack segment.
1959 * @param puEsp Where to return the new stack pointer.
1960 */
1961static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
1962{
1963 VBOXSTRICTRC rcStrict;
1964 Assert(uCpl < 4);
1965
1966 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1967 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
1968 {
1969 /*
1970 * 16-bit TSS (X86TSS16).
1971 */
1972 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1973 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1974 {
1975 uint32_t off = uCpl * 4 + 2;
1976 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1977 {
1978 /** @todo check actual access pattern here. */
1979 uint32_t u32Tmp = 0; /* gcc maybe... */
1980 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1981 if (rcStrict == VINF_SUCCESS)
1982 {
1983 *puEsp = RT_LOWORD(u32Tmp);
1984 *pSelSS = RT_HIWORD(u32Tmp);
1985 return VINF_SUCCESS;
1986 }
1987 }
1988 else
1989 {
1990 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1991 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1992 }
1993 break;
1994 }
1995
1996 /*
1997 * 32-bit TSS (X86TSS32).
1998 */
1999 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
2000 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2001 {
2002 uint32_t off = uCpl * 8 + 4;
2003 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
2004 {
2005/** @todo check actual access pattern here. */
2006 uint64_t u64Tmp;
2007 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2008 if (rcStrict == VINF_SUCCESS)
2009 {
2010 *puEsp = u64Tmp & UINT32_MAX;
2011 *pSelSS = (RTSEL)(u64Tmp >> 32);
2012 return VINF_SUCCESS;
2013 }
2014 }
2015 else
2016 {
2017 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
2018 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2019 }
2020 break;
2021 }
2022
2023 default:
2024 AssertFailed();
2025 rcStrict = VERR_IEM_IPE_4;
2026 break;
2027 }
2028
2029 *puEsp = 0; /* make gcc happy */
2030 *pSelSS = 0; /* make gcc happy */
2031 return rcStrict;
2032}
2033
2034
2035/**
2036 * Loads the specified stack pointer from the 64-bit TSS.
2037 *
2038 * @returns VBox strict status code.
2039 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2040 * @param uCpl The CPL to load the stack for.
2041 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2042 * @param puRsp Where to return the new stack pointer.
2043 */
2044static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
2045{
2046 Assert(uCpl < 4);
2047 Assert(uIst < 8);
2048 *puRsp = 0; /* make gcc happy */
2049
2050 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2051 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
2052
2053 uint32_t off;
2054 if (uIst)
2055 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
2056 else
2057 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
2058 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
2059 {
2060 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
2061 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2062 }
2063
2064 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2065}
2066
2067
2068/**
2069 * Adjust the CPU state according to the exception being raised.
2070 *
2071 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2072 * @param u8Vector The exception that has been raised.
2073 */
2074DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
2075{
2076 switch (u8Vector)
2077 {
2078 case X86_XCPT_DB:
2079 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
2080 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
2081 break;
2082 /** @todo Read the AMD and Intel exception reference... */
2083 }
2084}
2085
2086
2087/**
2088 * Implements exceptions and interrupts for real mode.
2089 *
2090 * @returns VBox strict status code.
2091 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2092 * @param cbInstr The number of bytes to offset rIP by in the return
2093 * address.
2094 * @param u8Vector The interrupt / exception vector number.
2095 * @param fFlags The flags.
2096 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2097 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2098 */
2099static VBOXSTRICTRC
2100iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
2101 uint8_t cbInstr,
2102 uint8_t u8Vector,
2103 uint32_t fFlags,
2104 uint16_t uErr,
2105 uint64_t uCr2) RT_NOEXCEPT
2106{
2107 NOREF(uErr); NOREF(uCr2);
2108 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2109
2110 /*
2111 * Read the IDT entry.
2112 */
2113 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2114 {
2115 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2116 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2117 }
2118 RTFAR16 Idte;
2119 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
2120 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2121 {
2122 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2123 return rcStrict;
2124 }
2125
2126#ifdef LOG_ENABLED
2127 /* If software interrupt, try decode it if logging is enabled and such. */
2128 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2129 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
2130 iemLogSyscallRealModeInt(pVCpu, u8Vector, cbInstr);
2131#endif
2132
2133 /*
2134 * Push the stack frame.
2135 */
2136 uint8_t bUnmapInfo;
2137 uint16_t *pu16Frame;
2138 uint64_t uNewRsp;
2139 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &bUnmapInfo, &uNewRsp);
2140 if (rcStrict != VINF_SUCCESS)
2141 return rcStrict;
2142
2143 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
2144#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
2145 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
2146 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
2147 fEfl |= UINT16_C(0xf000);
2148#endif
2149 pu16Frame[2] = (uint16_t)fEfl;
2150 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
2151 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
2152 rcStrict = iemMemStackPushCommitSpecial(pVCpu, bUnmapInfo, uNewRsp);
2153 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2154 return rcStrict;
2155
2156 /*
2157 * Load the vector address into cs:ip and make exception specific state
2158 * adjustments.
2159 */
2160 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
2161 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
2162 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2163 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
2164 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2165 pVCpu->cpum.GstCtx.rip = Idte.off;
2166 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
2167 IEMMISC_SET_EFL(pVCpu, fEfl);
2168
2169 /** @todo do we actually do this in real mode? */
2170 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2171 iemRaiseXcptAdjustState(pVCpu, u8Vector);
2172
2173 /* The IEM_F_MODE_XXX and IEM_F_X86_CPL_MASK doesn't really change here,
2174 so best leave them alone in case we're in a weird kind of real mode... */
2175
2176 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2177}
2178
2179
2180/**
2181 * Loads a NULL data selector into when coming from V8086 mode.
2182 *
2183 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2184 * @param pSReg Pointer to the segment register.
2185 */
2186DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
2187{
2188 pSReg->Sel = 0;
2189 pSReg->ValidSel = 0;
2190 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2191 {
2192 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2193 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2194 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2195 }
2196 else
2197 {
2198 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2199 /** @todo check this on AMD-V */
2200 pSReg->u64Base = 0;
2201 pSReg->u32Limit = 0;
2202 }
2203}
2204
2205
2206/**
2207 * Loads a segment selector during a task switch in V8086 mode.
2208 *
2209 * @param pSReg Pointer to the segment register.
2210 * @param uSel The selector value to load.
2211 */
2212DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
2213{
2214 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2215 pSReg->Sel = uSel;
2216 pSReg->ValidSel = uSel;
2217 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2218 pSReg->u64Base = uSel << 4;
2219 pSReg->u32Limit = 0xffff;
2220 pSReg->Attr.u = 0xf3;
2221}
2222
2223
2224/**
2225 * Loads a segment selector during a task switch in protected mode.
2226 *
2227 * In this task switch scenario, we would throw \#TS exceptions rather than
2228 * \#GPs.
2229 *
2230 * @returns VBox strict status code.
2231 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2232 * @param pSReg Pointer to the segment register.
2233 * @param uSel The new selector value.
2234 *
2235 * @remarks This does _not_ handle CS or SS.
2236 * @remarks This expects IEM_GET_CPL(pVCpu) to return an up to date value.
2237 */
2238static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
2239{
2240 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2241
2242 /* Null data selector. */
2243 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2244 {
2245 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2246 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2247 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2248 return VINF_SUCCESS;
2249 }
2250
2251 /* Fetch the descriptor. */
2252 IEMSELDESC Desc;
2253 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2254 if (rcStrict != VINF_SUCCESS)
2255 {
2256 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2257 VBOXSTRICTRC_VAL(rcStrict)));
2258 return rcStrict;
2259 }
2260
2261 /* Must be a data segment or readable code segment. */
2262 if ( !Desc.Legacy.Gen.u1DescType
2263 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2264 {
2265 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2266 Desc.Legacy.Gen.u4Type));
2267 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2268 }
2269
2270 /* Check privileges for data segments and non-conforming code segments. */
2271 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2272 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2273 {
2274 /* The RPL and the new CPL must be less than or equal to the DPL. */
2275 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2276 || (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl))
2277 {
2278 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2279 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
2280 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2281 }
2282 }
2283
2284 /* Is it there? */
2285 if (!Desc.Legacy.Gen.u1Present)
2286 {
2287 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2288 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2289 }
2290
2291 /* The base and limit. */
2292 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2293 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2294
2295 /*
2296 * Ok, everything checked out fine. Now set the accessed bit before
2297 * committing the result into the registers.
2298 */
2299 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2300 {
2301 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2302 if (rcStrict != VINF_SUCCESS)
2303 return rcStrict;
2304 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2305 }
2306
2307 /* Commit */
2308 pSReg->Sel = uSel;
2309 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2310 pSReg->u32Limit = cbLimit;
2311 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2312 pSReg->ValidSel = uSel;
2313 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2314 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2315 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2316
2317 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2318 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2319 return VINF_SUCCESS;
2320}
2321
2322
2323/**
2324 * Performs a task switch.
2325 *
2326 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2327 * caller is responsible for performing the necessary checks (like DPL, TSS
2328 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2329 * reference for JMP, CALL, IRET.
2330 *
2331 * If the task switch is the due to a software interrupt or hardware exception,
2332 * the caller is responsible for validating the TSS selector and descriptor. See
2333 * Intel Instruction reference for INT n.
2334 *
2335 * @returns VBox strict status code.
2336 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2337 * @param enmTaskSwitch The cause of the task switch.
2338 * @param uNextEip The EIP effective after the task switch.
2339 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2340 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2341 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2342 * @param SelTss The TSS selector of the new task.
2343 * @param pNewDescTss Pointer to the new TSS descriptor.
2344 */
2345VBOXSTRICTRC
2346iemTaskSwitch(PVMCPUCC pVCpu,
2347 IEMTASKSWITCH enmTaskSwitch,
2348 uint32_t uNextEip,
2349 uint32_t fFlags,
2350 uint16_t uErr,
2351 uint64_t uCr2,
2352 RTSEL SelTss,
2353 PIEMSELDESC pNewDescTss) RT_NOEXCEPT
2354{
2355 Assert(!IEM_IS_REAL_MODE(pVCpu));
2356 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2357 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2358
2359 uint32_t const uNewTssType = pNewDescTss->Legacy.Gate.u4Type;
2360 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2361 || uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2362 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2363 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2364
2365 bool const fIsNewTss386 = ( uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2366 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2367
2368 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTss=%#x fIsNewTss386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTss,
2369 fIsNewTss386, pVCpu->cpum.GstCtx.eip, uNextEip));
2370
2371 /* Update CR2 in case it's a page-fault. */
2372 /** @todo This should probably be done much earlier in IEM/PGM. See
2373 * @bugref{5653#c49}. */
2374 if (fFlags & IEM_XCPT_FLAGS_CR2)
2375 pVCpu->cpum.GstCtx.cr2 = uCr2;
2376
2377 /*
2378 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2379 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2380 */
2381 uint32_t const uNewTssLimit = pNewDescTss->Legacy.Gen.u16LimitLow | (pNewDescTss->Legacy.Gen.u4LimitHigh << 16);
2382 uint32_t const uNewTssLimitMin = fIsNewTss386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2383 if (uNewTssLimit < uNewTssLimitMin)
2384 {
2385 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTssLimit=%#x uNewTssLimitMin=%#x -> #TS\n",
2386 enmTaskSwitch, uNewTssLimit, uNewTssLimitMin));
2387 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
2388 }
2389
2390 /*
2391 * Task switches in VMX non-root mode always cause task switches.
2392 * The new TSS must have been read and validated (DPL, limits etc.) before a
2393 * task-switch VM-exit commences.
2394 *
2395 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2396 */
2397 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2398 {
2399 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTss));
2400 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTss, uNextEip - pVCpu->cpum.GstCtx.eip);
2401 }
2402
2403 /*
2404 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2405 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2406 */
2407 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2408 {
2409 uint64_t const uExitInfo1 = SelTss;
2410 uint64_t uExitInfo2 = uErr;
2411 switch (enmTaskSwitch)
2412 {
2413 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2414 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2415 default: break;
2416 }
2417 if (fFlags & IEM_XCPT_FLAGS_ERR)
2418 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2419 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2420 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2421
2422 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2423 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2424 RT_NOREF2(uExitInfo1, uExitInfo2);
2425 }
2426
2427 /*
2428 * Check the current TSS limit. The last written byte to the current TSS during the
2429 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2430 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2431 *
2432 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2433 * end up with smaller than "legal" TSS limits.
2434 */
2435 uint32_t const uCurTssLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2436 uint32_t const uCurTssLimitMin = fIsNewTss386 ? 0x5F : 0x29;
2437 if (uCurTssLimit < uCurTssLimitMin)
2438 {
2439 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTssLimit=%#x uCurTssLimitMin=%#x -> #TS\n",
2440 enmTaskSwitch, uCurTssLimit, uCurTssLimitMin));
2441 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
2442 }
2443
2444 /*
2445 * Verify that the new TSS can be accessed and map it. Map only the required contents
2446 * and not the entire TSS.
2447 */
2448 uint8_t bUnmapInfoNewTss;
2449 void *pvNewTss;
2450 uint32_t const cbNewTss = uNewTssLimitMin + 1;
2451 RTGCPTR const GCPtrNewTss = X86DESC_BASE(&pNewDescTss->Legacy);
2452 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2453 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2454 * not perform correct translation if this happens. See Intel spec. 7.2.1
2455 * "Task-State Segment". */
2456 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTss, &bUnmapInfoNewTss, cbNewTss, UINT8_MAX, GCPtrNewTss, IEM_ACCESS_SYS_RW, 0);
2457/** @todo Not cleaning up bUnmapInfoNewTss mapping in any early exits here.
2458 * Consider wrapping the remainder into a function for simpler cleanup. */
2459 if (rcStrict != VINF_SUCCESS)
2460 {
2461 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTss=%u uNewTssLimit=%u rc=%Rrc\n", enmTaskSwitch,
2462 cbNewTss, uNewTssLimit, VBOXSTRICTRC_VAL(rcStrict)));
2463 return rcStrict;
2464 }
2465
2466 /*
2467 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2468 */
2469 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
2470 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2471 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2472 {
2473 uint8_t bUnmapInfoDescCurTss;
2474 PX86DESC pDescCurTss;
2475 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTss, &bUnmapInfoDescCurTss, sizeof(*pDescCurTss), UINT8_MAX,
2476 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2477 if (rcStrict != VINF_SUCCESS)
2478 {
2479 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2480 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2481 return rcStrict;
2482 }
2483
2484 pDescCurTss->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2485 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoDescCurTss);
2486 if (rcStrict != VINF_SUCCESS)
2487 {
2488 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2489 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2490 return rcStrict;
2491 }
2492
2493 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2494 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2495 {
2496 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2497 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2498 fEFlags &= ~X86_EFL_NT;
2499 }
2500 }
2501
2502 /*
2503 * Save the CPU state into the current TSS.
2504 */
2505 RTGCPTR const GCPtrCurTss = pVCpu->cpum.GstCtx.tr.u64Base;
2506 if (GCPtrNewTss == GCPtrCurTss)
2507 {
2508 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTss));
2509 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2510 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,
2511 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2512 pVCpu->cpum.GstCtx.ldtr.Sel));
2513 }
2514 if (fIsNewTss386)
2515 {
2516 /*
2517 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2518 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2519 */
2520 uint8_t bUnmapInfoCurTss32;
2521 void *pvCurTss32;
2522 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS32, eip);
2523 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2524 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2525 rcStrict = iemMemMap(pVCpu, &pvCurTss32, &bUnmapInfoCurTss32, cbCurTss, UINT8_MAX,
2526 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
2527 if (rcStrict != VINF_SUCCESS)
2528 {
2529 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
2530 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
2531 return rcStrict;
2532 }
2533
2534 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
2535 PX86TSS32 pCurTss32 = (PX86TSS32)((uintptr_t)pvCurTss32 - offCurTss);
2536 pCurTss32->eip = uNextEip;
2537 pCurTss32->eflags = fEFlags;
2538 pCurTss32->eax = pVCpu->cpum.GstCtx.eax;
2539 pCurTss32->ecx = pVCpu->cpum.GstCtx.ecx;
2540 pCurTss32->edx = pVCpu->cpum.GstCtx.edx;
2541 pCurTss32->ebx = pVCpu->cpum.GstCtx.ebx;
2542 pCurTss32->esp = pVCpu->cpum.GstCtx.esp;
2543 pCurTss32->ebp = pVCpu->cpum.GstCtx.ebp;
2544 pCurTss32->esi = pVCpu->cpum.GstCtx.esi;
2545 pCurTss32->edi = pVCpu->cpum.GstCtx.edi;
2546 pCurTss32->es = pVCpu->cpum.GstCtx.es.Sel;
2547 pCurTss32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2548 pCurTss32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2549 pCurTss32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2550 pCurTss32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2551 pCurTss32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2552
2553 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss32);
2554 if (rcStrict != VINF_SUCCESS)
2555 {
2556 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2557 VBOXSTRICTRC_VAL(rcStrict)));
2558 return rcStrict;
2559 }
2560 }
2561 else
2562 {
2563 /*
2564 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2565 */
2566 uint8_t bUnmapInfoCurTss16;
2567 void *pvCurTss16;
2568 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS16, ip);
2569 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2570 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2571 rcStrict = iemMemMap(pVCpu, &pvCurTss16, &bUnmapInfoCurTss16, cbCurTss, UINT8_MAX,
2572 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
2573 if (rcStrict != VINF_SUCCESS)
2574 {
2575 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
2576 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
2577 return rcStrict;
2578 }
2579
2580 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
2581 PX86TSS16 pCurTss16 = (PX86TSS16)((uintptr_t)pvCurTss16 - offCurTss);
2582 pCurTss16->ip = uNextEip;
2583 pCurTss16->flags = (uint16_t)fEFlags;
2584 pCurTss16->ax = pVCpu->cpum.GstCtx.ax;
2585 pCurTss16->cx = pVCpu->cpum.GstCtx.cx;
2586 pCurTss16->dx = pVCpu->cpum.GstCtx.dx;
2587 pCurTss16->bx = pVCpu->cpum.GstCtx.bx;
2588 pCurTss16->sp = pVCpu->cpum.GstCtx.sp;
2589 pCurTss16->bp = pVCpu->cpum.GstCtx.bp;
2590 pCurTss16->si = pVCpu->cpum.GstCtx.si;
2591 pCurTss16->di = pVCpu->cpum.GstCtx.di;
2592 pCurTss16->es = pVCpu->cpum.GstCtx.es.Sel;
2593 pCurTss16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2594 pCurTss16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2595 pCurTss16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2596
2597 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss16);
2598 if (rcStrict != VINF_SUCCESS)
2599 {
2600 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2601 VBOXSTRICTRC_VAL(rcStrict)));
2602 return rcStrict;
2603 }
2604 }
2605
2606 /*
2607 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2608 */
2609 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2610 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2611 {
2612 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2613 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTss;
2614 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2615 }
2616
2617 /*
2618 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2619 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2620 */
2621 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2622 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2623 bool fNewDebugTrap;
2624 if (fIsNewTss386)
2625 {
2626 PCX86TSS32 pNewTss32 = (PCX86TSS32)pvNewTss;
2627 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTss32->cr3 : 0;
2628 uNewEip = pNewTss32->eip;
2629 uNewEflags = pNewTss32->eflags;
2630 uNewEax = pNewTss32->eax;
2631 uNewEcx = pNewTss32->ecx;
2632 uNewEdx = pNewTss32->edx;
2633 uNewEbx = pNewTss32->ebx;
2634 uNewEsp = pNewTss32->esp;
2635 uNewEbp = pNewTss32->ebp;
2636 uNewEsi = pNewTss32->esi;
2637 uNewEdi = pNewTss32->edi;
2638 uNewES = pNewTss32->es;
2639 uNewCS = pNewTss32->cs;
2640 uNewSS = pNewTss32->ss;
2641 uNewDS = pNewTss32->ds;
2642 uNewFS = pNewTss32->fs;
2643 uNewGS = pNewTss32->gs;
2644 uNewLdt = pNewTss32->selLdt;
2645 fNewDebugTrap = RT_BOOL(pNewTss32->fDebugTrap);
2646 }
2647 else
2648 {
2649 PCX86TSS16 pNewTss16 = (PCX86TSS16)pvNewTss;
2650 uNewCr3 = 0;
2651 uNewEip = pNewTss16->ip;
2652 uNewEflags = pNewTss16->flags;
2653 uNewEax = UINT32_C(0xffff0000) | pNewTss16->ax;
2654 uNewEcx = UINT32_C(0xffff0000) | pNewTss16->cx;
2655 uNewEdx = UINT32_C(0xffff0000) | pNewTss16->dx;
2656 uNewEbx = UINT32_C(0xffff0000) | pNewTss16->bx;
2657 uNewEsp = UINT32_C(0xffff0000) | pNewTss16->sp;
2658 uNewEbp = UINT32_C(0xffff0000) | pNewTss16->bp;
2659 uNewEsi = UINT32_C(0xffff0000) | pNewTss16->si;
2660 uNewEdi = UINT32_C(0xffff0000) | pNewTss16->di;
2661 uNewES = pNewTss16->es;
2662 uNewCS = pNewTss16->cs;
2663 uNewSS = pNewTss16->ss;
2664 uNewDS = pNewTss16->ds;
2665 uNewFS = 0;
2666 uNewGS = 0;
2667 uNewLdt = pNewTss16->selLdt;
2668 fNewDebugTrap = false;
2669 }
2670
2671 if (GCPtrNewTss == GCPtrCurTss)
2672 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2673 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2674
2675 /*
2676 * We're done accessing the new TSS.
2677 */
2678 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
2679 if (rcStrict != VINF_SUCCESS)
2680 {
2681 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2682 return rcStrict;
2683 }
2684
2685 /*
2686 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2687 */
2688 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2689 {
2690 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTss, &bUnmapInfoNewTss, sizeof(*pNewDescTss), UINT8_MAX,
2691 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTss & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2692 if (rcStrict != VINF_SUCCESS)
2693 {
2694 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2695 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2696 return rcStrict;
2697 }
2698
2699 /* Check that the descriptor indicates the new TSS is available (not busy). */
2700 AssertMsg( pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2701 || pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2702 ("Invalid TSS descriptor type=%#x", pNewDescTss->Legacy.Gate.u4Type));
2703
2704 pNewDescTss->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2705 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
2706 if (rcStrict != VINF_SUCCESS)
2707 {
2708 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2709 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2710 return rcStrict;
2711 }
2712 }
2713
2714 /*
2715 * From this point on, we're technically in the new task. We will defer exceptions
2716 * until the completion of the task switch but before executing any instructions in the new task.
2717 */
2718 pVCpu->cpum.GstCtx.tr.Sel = SelTss;
2719 pVCpu->cpum.GstCtx.tr.ValidSel = SelTss;
2720 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2721 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTss->Legacy);
2722 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTss->Legacy);
2723 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTss->Legacy);
2724 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
2725
2726 /* Set the busy bit in TR. */
2727 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2728
2729 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2730 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2731 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2732 {
2733 uNewEflags |= X86_EFL_NT;
2734 }
2735
2736 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2737 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
2738 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
2739
2740 pVCpu->cpum.GstCtx.eip = uNewEip;
2741 pVCpu->cpum.GstCtx.eax = uNewEax;
2742 pVCpu->cpum.GstCtx.ecx = uNewEcx;
2743 pVCpu->cpum.GstCtx.edx = uNewEdx;
2744 pVCpu->cpum.GstCtx.ebx = uNewEbx;
2745 pVCpu->cpum.GstCtx.esp = uNewEsp;
2746 pVCpu->cpum.GstCtx.ebp = uNewEbp;
2747 pVCpu->cpum.GstCtx.esi = uNewEsi;
2748 pVCpu->cpum.GstCtx.edi = uNewEdi;
2749
2750 uNewEflags &= X86_EFL_LIVE_MASK;
2751 uNewEflags |= X86_EFL_RA1_MASK;
2752 IEMMISC_SET_EFL(pVCpu, uNewEflags);
2753
2754 /*
2755 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2756 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2757 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2758 */
2759 pVCpu->cpum.GstCtx.es.Sel = uNewES;
2760 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
2761
2762 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2763 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
2764
2765 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2766 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
2767
2768 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
2769 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
2770
2771 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
2772 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
2773
2774 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
2775 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
2776 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2777
2778 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
2779 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2780 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
2781 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
2782
2783 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2784 {
2785 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
2786 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
2787 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
2788 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
2789 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
2790 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
2791 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2792 }
2793
2794 /*
2795 * Switch CR3 for the new task.
2796 */
2797 if ( fIsNewTss386
2798 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
2799 {
2800 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2801 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
2802 AssertRCSuccessReturn(rc, rc);
2803
2804 /* Inform PGM. */
2805 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
2806 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
2807 AssertRCReturn(rc, rc);
2808 /* ignore informational status codes */
2809
2810 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
2811 }
2812
2813 /*
2814 * Switch LDTR for the new task.
2815 */
2816 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2817 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
2818 else
2819 {
2820 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2821
2822 IEMSELDESC DescNewLdt;
2823 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2824 if (rcStrict != VINF_SUCCESS)
2825 {
2826 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2827 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2828 return rcStrict;
2829 }
2830 if ( !DescNewLdt.Legacy.Gen.u1Present
2831 || DescNewLdt.Legacy.Gen.u1DescType
2832 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2833 {
2834 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2835 uNewLdt, DescNewLdt.Legacy.u));
2836 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2837 }
2838
2839 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
2840 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2841 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2842 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2843 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2844 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2845 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2846 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
2847 }
2848
2849 IEMSELDESC DescSS;
2850 if (IEM_IS_V86_MODE(pVCpu))
2851 {
2852 IEM_SET_CPL(pVCpu, 3);
2853 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
2854 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
2855 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
2856 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
2857 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
2858 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
2859
2860 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
2861 DescSS.Legacy.u = 0;
2862 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
2863 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
2864 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
2865 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
2866 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
2867 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
2868 DescSS.Legacy.Gen.u2Dpl = 3;
2869 }
2870 else
2871 {
2872 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
2873
2874 /*
2875 * Load the stack segment for the new task.
2876 */
2877 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2878 {
2879 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2880 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2881 }
2882
2883 /* Fetch the descriptor. */
2884 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
2885 if (rcStrict != VINF_SUCCESS)
2886 {
2887 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2888 VBOXSTRICTRC_VAL(rcStrict)));
2889 return rcStrict;
2890 }
2891
2892 /* SS must be a data segment and writable. */
2893 if ( !DescSS.Legacy.Gen.u1DescType
2894 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2895 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2896 {
2897 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2898 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2899 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2900 }
2901
2902 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2903 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2904 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2905 {
2906 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2907 uNewCpl));
2908 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2909 }
2910
2911 /* Is it there? */
2912 if (!DescSS.Legacy.Gen.u1Present)
2913 {
2914 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
2915 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2916 }
2917
2918 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
2919 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
2920
2921 /* Set the accessed bit before committing the result into SS. */
2922 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2923 {
2924 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
2925 if (rcStrict != VINF_SUCCESS)
2926 return rcStrict;
2927 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2928 }
2929
2930 /* Commit SS. */
2931 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2932 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
2933 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2934 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
2935 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
2936 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2937 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
2938
2939 /* CPL has changed, update IEM before loading rest of segments. */
2940 IEM_SET_CPL(pVCpu, uNewCpl);
2941
2942 /*
2943 * Load the data segments for the new task.
2944 */
2945 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
2946 if (rcStrict != VINF_SUCCESS)
2947 return rcStrict;
2948 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
2949 if (rcStrict != VINF_SUCCESS)
2950 return rcStrict;
2951 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
2952 if (rcStrict != VINF_SUCCESS)
2953 return rcStrict;
2954 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
2955 if (rcStrict != VINF_SUCCESS)
2956 return rcStrict;
2957
2958 /*
2959 * Load the code segment for the new task.
2960 */
2961 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
2962 {
2963 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
2964 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2965 }
2966
2967 /* Fetch the descriptor. */
2968 IEMSELDESC DescCS;
2969 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
2970 if (rcStrict != VINF_SUCCESS)
2971 {
2972 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
2973 return rcStrict;
2974 }
2975
2976 /* CS must be a code segment. */
2977 if ( !DescCS.Legacy.Gen.u1DescType
2978 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2979 {
2980 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
2981 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
2982 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2983 }
2984
2985 /* For conforming CS, DPL must be less than or equal to the RPL. */
2986 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2987 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
2988 {
2989 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
2990 DescCS.Legacy.Gen.u2Dpl));
2991 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2992 }
2993
2994 /* For non-conforming CS, DPL must match RPL. */
2995 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2996 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
2997 {
2998 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
2999 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
3000 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3001 }
3002
3003 /* Is it there? */
3004 if (!DescCS.Legacy.Gen.u1Present)
3005 {
3006 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
3007 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3008 }
3009
3010 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
3011 u64Base = X86DESC_BASE(&DescCS.Legacy);
3012
3013 /* Set the accessed bit before committing the result into CS. */
3014 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3015 {
3016 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
3017 if (rcStrict != VINF_SUCCESS)
3018 return rcStrict;
3019 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3020 }
3021
3022 /* Commit CS. */
3023 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
3024 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
3025 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3026 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
3027 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
3028 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3029 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
3030 }
3031
3032 /* Make sure the CPU mode is correct. */
3033 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3034 if (fExecNew != pVCpu->iem.s.fExec)
3035 Log(("iemTaskSwitch: fExec %#x -> %#x (xor %#x)\n", pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3036 pVCpu->iem.s.fExec = fExecNew;
3037
3038 /** @todo Debug trap. */
3039 if (fIsNewTss386 && fNewDebugTrap)
3040 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
3041
3042 /*
3043 * Construct the error code masks based on what caused this task switch.
3044 * See Intel Instruction reference for INT.
3045 */
3046 uint16_t uExt;
3047 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
3048 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3049 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
3050 uExt = 1;
3051 else
3052 uExt = 0;
3053
3054 /*
3055 * Push any error code on to the new stack.
3056 */
3057 if (fFlags & IEM_XCPT_FLAGS_ERR)
3058 {
3059 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3060 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3061 uint8_t const cbStackFrame = fIsNewTss386 ? 4 : 2;
3062
3063 /* Check that there is sufficient space on the stack. */
3064 /** @todo Factor out segment limit checking for normal/expand down segments
3065 * into a separate function. */
3066 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3067 {
3068 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
3069 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
3070 {
3071 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3072 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
3073 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3074 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3075 }
3076 }
3077 else
3078 {
3079 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
3080 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3081 {
3082 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
3083 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3084 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3085 }
3086 }
3087
3088
3089 if (fIsNewTss386)
3090 rcStrict = iemMemStackPushU32(pVCpu, uErr);
3091 else
3092 rcStrict = iemMemStackPushU16(pVCpu, uErr);
3093 if (rcStrict != VINF_SUCCESS)
3094 {
3095 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
3096 fIsNewTss386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
3097 return rcStrict;
3098 }
3099 }
3100
3101 /* Check the new EIP against the new CS limit. */
3102 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
3103 {
3104 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
3105 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
3106 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3107 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
3108 }
3109
3110 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3111 pVCpu->cpum.GstCtx.ss.Sel));
3112 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3113}
3114
3115
3116/**
3117 * Implements exceptions and interrupts for protected mode.
3118 *
3119 * @returns VBox strict status code.
3120 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3121 * @param cbInstr The number of bytes to offset rIP by in the return
3122 * address.
3123 * @param u8Vector The interrupt / exception vector number.
3124 * @param fFlags The flags.
3125 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3126 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3127 */
3128static VBOXSTRICTRC
3129iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
3130 uint8_t cbInstr,
3131 uint8_t u8Vector,
3132 uint32_t fFlags,
3133 uint16_t uErr,
3134 uint64_t uCr2) RT_NOEXCEPT
3135{
3136 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3137
3138 /*
3139 * Read the IDT entry.
3140 */
3141 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3142 {
3143 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3144 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3145 }
3146 X86DESC Idte;
3147 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
3148 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
3149 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3150 {
3151 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3152 return rcStrict;
3153 }
3154 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x - from %04x:%08RX64 efl=%#x depth=%d\n",
3155 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3156 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow,
3157 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->iem.s.cXcptRecursions));
3158
3159 /*
3160 * Check the descriptor type, DPL and such.
3161 * ASSUMES this is done in the same order as described for call-gate calls.
3162 */
3163 if (Idte.Gate.u1DescType)
3164 {
3165 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3166 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3167 }
3168 bool fTaskGate = false;
3169 uint8_t f32BitGate = true;
3170 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3171 switch (Idte.Gate.u4Type)
3172 {
3173 case X86_SEL_TYPE_SYS_UNDEFINED:
3174 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3175 case X86_SEL_TYPE_SYS_LDT:
3176 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3177 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3178 case X86_SEL_TYPE_SYS_UNDEFINED2:
3179 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3180 case X86_SEL_TYPE_SYS_UNDEFINED3:
3181 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3182 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3183 case X86_SEL_TYPE_SYS_UNDEFINED4:
3184 {
3185 /** @todo check what actually happens when the type is wrong...
3186 * esp. call gates. */
3187 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3188 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3189 }
3190
3191 case X86_SEL_TYPE_SYS_286_INT_GATE:
3192 f32BitGate = false;
3193 RT_FALL_THRU();
3194 case X86_SEL_TYPE_SYS_386_INT_GATE:
3195 fEflToClear |= X86_EFL_IF;
3196 break;
3197
3198 case X86_SEL_TYPE_SYS_TASK_GATE:
3199 fTaskGate = true;
3200#ifndef IEM_IMPLEMENTS_TASKSWITCH
3201 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3202#endif
3203 break;
3204
3205 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3206 f32BitGate = false;
3207 break;
3208 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3209 break;
3210
3211 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3212 }
3213
3214 /* Check DPL against CPL if applicable. */
3215 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3216 {
3217 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3218 {
3219 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3220 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3221 }
3222 }
3223
3224 /* Is it there? */
3225 if (!Idte.Gate.u1Present)
3226 {
3227 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3228 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3229 }
3230
3231 /* Is it a task-gate? */
3232 if (fTaskGate)
3233 {
3234 /*
3235 * Construct the error code masks based on what caused this task switch.
3236 * See Intel Instruction reference for INT.
3237 */
3238 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3239 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
3240 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3241 RTSEL SelTss = Idte.Gate.u16Sel;
3242
3243 /*
3244 * Fetch the TSS descriptor in the GDT.
3245 */
3246 IEMSELDESC DescTSS;
3247 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTss, X86_XCPT_GP, (SelTss & uSelMask) | uExt);
3248 if (rcStrict != VINF_SUCCESS)
3249 {
3250 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTss,
3251 VBOXSTRICTRC_VAL(rcStrict)));
3252 return rcStrict;
3253 }
3254
3255 /* The TSS descriptor must be a system segment and be available (not busy). */
3256 if ( DescTSS.Legacy.Gen.u1DescType
3257 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3258 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3259 {
3260 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3261 u8Vector, SelTss, DescTSS.Legacy.au64));
3262 return iemRaiseGeneralProtectionFault(pVCpu, (SelTss & uSelMask) | uExt);
3263 }
3264
3265 /* The TSS must be present. */
3266 if (!DescTSS.Legacy.Gen.u1Present)
3267 {
3268 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTss, DescTSS.Legacy.au64));
3269 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTss & uSelMask) | uExt);
3270 }
3271
3272 /* Do the actual task switch. */
3273 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3274 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3275 fFlags, uErr, uCr2, SelTss, &DescTSS);
3276 }
3277
3278 /* A null CS is bad. */
3279 RTSEL NewCS = Idte.Gate.u16Sel;
3280 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3281 {
3282 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3283 return iemRaiseGeneralProtectionFault0(pVCpu);
3284 }
3285
3286 /* Fetch the descriptor for the new CS. */
3287 IEMSELDESC DescCS;
3288 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3289 if (rcStrict != VINF_SUCCESS)
3290 {
3291 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3292 return rcStrict;
3293 }
3294
3295 /* Must be a code segment. */
3296 if (!DescCS.Legacy.Gen.u1DescType)
3297 {
3298 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3299 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3300 }
3301 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3302 {
3303 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3304 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3305 }
3306
3307 /* Don't allow lowering the privilege level. */
3308 /** @todo Does the lowering of privileges apply to software interrupts
3309 * only? This has bearings on the more-privileged or
3310 * same-privilege stack behavior further down. A testcase would
3311 * be nice. */
3312 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3313 {
3314 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3315 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3316 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3317 }
3318
3319 /* Make sure the selector is present. */
3320 if (!DescCS.Legacy.Gen.u1Present)
3321 {
3322 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3323 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3324 }
3325
3326#ifdef LOG_ENABLED
3327 /* If software interrupt, try decode it if logging is enabled and such. */
3328 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3329 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
3330 iemLogSyscallProtModeInt(pVCpu, u8Vector, cbInstr);
3331#endif
3332
3333 /* Check the new EIP against the new CS limit. */
3334 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3335 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3336 ? Idte.Gate.u16OffsetLow
3337 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3338 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3339 if (uNewEip > cbLimitCS)
3340 {
3341 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3342 u8Vector, uNewEip, cbLimitCS, NewCS));
3343 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3344 }
3345 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3346
3347 /* Calc the flag image to push. */
3348 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3349 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3350 fEfl &= ~X86_EFL_RF;
3351 else
3352 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3353
3354 /* From V8086 mode only go to CPL 0. */
3355 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3356 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3357 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3358 {
3359 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3360 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3361 }
3362
3363 /*
3364 * If the privilege level changes, we need to get a new stack from the TSS.
3365 * This in turns means validating the new SS and ESP...
3366 */
3367 if (uNewCpl != IEM_GET_CPL(pVCpu))
3368 {
3369 RTSEL NewSS;
3370 uint32_t uNewEsp;
3371 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3372 if (rcStrict != VINF_SUCCESS)
3373 return rcStrict;
3374
3375 IEMSELDESC DescSS;
3376 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3377 if (rcStrict != VINF_SUCCESS)
3378 return rcStrict;
3379 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3380 if (!DescSS.Legacy.Gen.u1DefBig)
3381 {
3382 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3383 uNewEsp = (uint16_t)uNewEsp;
3384 }
3385
3386 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3387
3388 /* Check that there is sufficient space for the stack frame. */
3389 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3390 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3391 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3392 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3393
3394 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3395 {
3396 if ( uNewEsp - 1 > cbLimitSS
3397 || uNewEsp < cbStackFrame)
3398 {
3399 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3400 u8Vector, NewSS, uNewEsp, cbStackFrame));
3401 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3402 }
3403 }
3404 else
3405 {
3406 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3407 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3408 {
3409 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3410 u8Vector, NewSS, uNewEsp, cbStackFrame));
3411 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3412 }
3413 }
3414
3415 /*
3416 * Start making changes.
3417 */
3418
3419 /* Set the new CPL so that stack accesses use it. */
3420 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3421 IEM_SET_CPL(pVCpu, uNewCpl);
3422
3423 /* Create the stack frame. */
3424 uint8_t bUnmapInfoStackFrame;
3425 RTPTRUNION uStackFrame;
3426 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
3427 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
3428 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3429 if (rcStrict != VINF_SUCCESS)
3430 return rcStrict;
3431 if (f32BitGate)
3432 {
3433 if (fFlags & IEM_XCPT_FLAGS_ERR)
3434 *uStackFrame.pu32++ = uErr;
3435 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3436 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3437 uStackFrame.pu32[2] = fEfl;
3438 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3439 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3440 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3441 if (fEfl & X86_EFL_VM)
3442 {
3443 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3444 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3445 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3446 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3447 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3448 }
3449 }
3450 else
3451 {
3452 if (fFlags & IEM_XCPT_FLAGS_ERR)
3453 *uStackFrame.pu16++ = uErr;
3454 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3455 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3456 uStackFrame.pu16[2] = fEfl;
3457 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3458 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3459 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3460 if (fEfl & X86_EFL_VM)
3461 {
3462 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3463 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3464 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3465 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3466 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3467 }
3468 }
3469 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
3470 if (rcStrict != VINF_SUCCESS)
3471 return rcStrict;
3472
3473 /* Mark the selectors 'accessed' (hope this is the correct time). */
3474 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3475 * after pushing the stack frame? (Write protect the gdt + stack to
3476 * find out.) */
3477 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3478 {
3479 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3480 if (rcStrict != VINF_SUCCESS)
3481 return rcStrict;
3482 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3483 }
3484
3485 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3486 {
3487 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3488 if (rcStrict != VINF_SUCCESS)
3489 return rcStrict;
3490 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3491 }
3492
3493 /*
3494 * Start comitting the register changes (joins with the DPL=CPL branch).
3495 */
3496 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3497 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3498 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3499 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3500 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3501 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3502 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3503 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3504 * SP is loaded).
3505 * Need to check the other combinations too:
3506 * - 16-bit TSS, 32-bit handler
3507 * - 32-bit TSS, 16-bit handler */
3508 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3509 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3510 else
3511 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3512
3513 if (fEfl & X86_EFL_VM)
3514 {
3515 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3516 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3517 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3518 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3519 }
3520 }
3521 /*
3522 * Same privilege, no stack change and smaller stack frame.
3523 */
3524 else
3525 {
3526 uint64_t uNewRsp;
3527 uint8_t bUnmapInfoStackFrame;
3528 RTPTRUNION uStackFrame;
3529 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3530 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1,
3531 &uStackFrame.pv, &bUnmapInfoStackFrame, &uNewRsp);
3532 if (rcStrict != VINF_SUCCESS)
3533 return rcStrict;
3534
3535 if (f32BitGate)
3536 {
3537 if (fFlags & IEM_XCPT_FLAGS_ERR)
3538 *uStackFrame.pu32++ = uErr;
3539 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3540 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3541 uStackFrame.pu32[2] = fEfl;
3542 }
3543 else
3544 {
3545 if (fFlags & IEM_XCPT_FLAGS_ERR)
3546 *uStackFrame.pu16++ = uErr;
3547 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3548 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3549 uStackFrame.pu16[2] = fEfl;
3550 }
3551 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame); /* don't use the commit here */
3552 if (rcStrict != VINF_SUCCESS)
3553 return rcStrict;
3554
3555 /* Mark the CS selector as 'accessed'. */
3556 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3557 {
3558 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3559 if (rcStrict != VINF_SUCCESS)
3560 return rcStrict;
3561 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3562 }
3563
3564 /*
3565 * Start committing the register changes (joins with the other branch).
3566 */
3567 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3568 }
3569
3570 /* ... register committing continues. */
3571 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3572 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3573 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3574 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3575 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3576 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3577
3578 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3579 fEfl &= ~fEflToClear;
3580 IEMMISC_SET_EFL(pVCpu, fEfl);
3581
3582 if (fFlags & IEM_XCPT_FLAGS_CR2)
3583 pVCpu->cpum.GstCtx.cr2 = uCr2;
3584
3585 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3586 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3587
3588 /* Make sure the execution flags are correct. */
3589 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3590 if (fExecNew != pVCpu->iem.s.fExec)
3591 Log(("iemRaiseXcptOrIntInProtMode: fExec %#x -> %#x (xor %#x)\n",
3592 pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3593 pVCpu->iem.s.fExec = fExecNew;
3594 Assert(IEM_GET_CPL(pVCpu) == uNewCpl);
3595
3596 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3597}
3598
3599
3600/**
3601 * Implements exceptions and interrupts for long mode.
3602 *
3603 * @returns VBox strict status code.
3604 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3605 * @param cbInstr The number of bytes to offset rIP by in the return
3606 * address.
3607 * @param u8Vector The interrupt / exception vector number.
3608 * @param fFlags The flags.
3609 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3610 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3611 */
3612static VBOXSTRICTRC
3613iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3614 uint8_t cbInstr,
3615 uint8_t u8Vector,
3616 uint32_t fFlags,
3617 uint16_t uErr,
3618 uint64_t uCr2) RT_NOEXCEPT
3619{
3620 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3621
3622 /*
3623 * Read the IDT entry.
3624 */
3625 uint16_t offIdt = (uint16_t)u8Vector << 4;
3626 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3627 {
3628 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3629 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3630 }
3631 X86DESC64 Idte;
3632#ifdef _MSC_VER /* Shut up silly compiler warning. */
3633 Idte.au64[0] = 0;
3634 Idte.au64[1] = 0;
3635#endif
3636 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3637 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3638 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3639 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3640 {
3641 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3642 return rcStrict;
3643 }
3644 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3645 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3646 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3647
3648 /*
3649 * Check the descriptor type, DPL and such.
3650 * ASSUMES this is done in the same order as described for call-gate calls.
3651 */
3652 if (Idte.Gate.u1DescType)
3653 {
3654 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3655 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3656 }
3657 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3658 switch (Idte.Gate.u4Type)
3659 {
3660 case AMD64_SEL_TYPE_SYS_INT_GATE:
3661 fEflToClear |= X86_EFL_IF;
3662 break;
3663 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3664 break;
3665
3666 default:
3667 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3668 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3669 }
3670
3671 /* Check DPL against CPL if applicable. */
3672 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3673 {
3674 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3675 {
3676 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3677 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3678 }
3679 }
3680
3681 /* Is it there? */
3682 if (!Idte.Gate.u1Present)
3683 {
3684 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3685 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3686 }
3687
3688 /* A null CS is bad. */
3689 RTSEL NewCS = Idte.Gate.u16Sel;
3690 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3691 {
3692 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3693 return iemRaiseGeneralProtectionFault0(pVCpu);
3694 }
3695
3696 /* Fetch the descriptor for the new CS. */
3697 IEMSELDESC DescCS;
3698 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
3699 if (rcStrict != VINF_SUCCESS)
3700 {
3701 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3702 return rcStrict;
3703 }
3704
3705 /* Must be a 64-bit code segment. */
3706 if (!DescCS.Long.Gen.u1DescType)
3707 {
3708 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3709 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3710 }
3711 if ( !DescCS.Long.Gen.u1Long
3712 || DescCS.Long.Gen.u1DefBig
3713 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3714 {
3715 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3716 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3717 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3718 }
3719
3720 /* Don't allow lowering the privilege level. For non-conforming CS
3721 selectors, the CS.DPL sets the privilege level the trap/interrupt
3722 handler runs at. For conforming CS selectors, the CPL remains
3723 unchanged, but the CS.DPL must be <= CPL. */
3724 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3725 * when CPU in Ring-0. Result \#GP? */
3726 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3727 {
3728 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3729 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3730 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3731 }
3732
3733
3734 /* Make sure the selector is present. */
3735 if (!DescCS.Legacy.Gen.u1Present)
3736 {
3737 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3738 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3739 }
3740
3741 /* Check that the new RIP is canonical. */
3742 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3743 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3744 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3745 if (!IEM_IS_CANONICAL(uNewRip))
3746 {
3747 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3748 return iemRaiseGeneralProtectionFault0(pVCpu);
3749 }
3750
3751 /*
3752 * If the privilege level changes or if the IST isn't zero, we need to get
3753 * a new stack from the TSS.
3754 */
3755 uint64_t uNewRsp;
3756 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3757 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3758 if ( uNewCpl != IEM_GET_CPL(pVCpu)
3759 || Idte.Gate.u3IST != 0)
3760 {
3761 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3762 if (rcStrict != VINF_SUCCESS)
3763 return rcStrict;
3764 }
3765 else
3766 uNewRsp = pVCpu->cpum.GstCtx.rsp;
3767 uNewRsp &= ~(uint64_t)0xf;
3768
3769 /*
3770 * Calc the flag image to push.
3771 */
3772 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3773 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3774 fEfl &= ~X86_EFL_RF;
3775 else
3776 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3777
3778 /*
3779 * Start making changes.
3780 */
3781 /* Set the new CPL so that stack accesses use it. */
3782 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3783 IEM_SET_CPL(pVCpu, uNewCpl);
3784/** @todo Setting CPL this early seems wrong as it would affect and errors we
3785 * raise accessing the stack and (?) GDT/LDT... */
3786
3787 /* Create the stack frame. */
3788 uint8_t bUnmapInfoStackFrame;
3789 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3790 RTPTRUNION uStackFrame;
3791 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
3792 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3793 if (rcStrict != VINF_SUCCESS)
3794 return rcStrict;
3795
3796 if (fFlags & IEM_XCPT_FLAGS_ERR)
3797 *uStackFrame.pu64++ = uErr;
3798 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
3799 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
3800 uStackFrame.pu64[2] = fEfl;
3801 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
3802 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
3803 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
3804 if (rcStrict != VINF_SUCCESS)
3805 return rcStrict;
3806
3807 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3808 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3809 * after pushing the stack frame? (Write protect the gdt + stack to
3810 * find out.) */
3811 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3812 {
3813 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3814 if (rcStrict != VINF_SUCCESS)
3815 return rcStrict;
3816 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3817 }
3818
3819 /*
3820 * Start comitting the register changes.
3821 */
3822 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3823 * hidden registers when interrupting 32-bit or 16-bit code! */
3824 if (uNewCpl != uOldCpl)
3825 {
3826 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
3827 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
3828 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3829 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3830 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3831 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3832 }
3833 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
3834 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3835 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3836 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3837 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3838 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3839 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3840 pVCpu->cpum.GstCtx.rip = uNewRip;
3841
3842 fEfl &= ~fEflToClear;
3843 IEMMISC_SET_EFL(pVCpu, fEfl);
3844
3845 if (fFlags & IEM_XCPT_FLAGS_CR2)
3846 pVCpu->cpum.GstCtx.cr2 = uCr2;
3847
3848 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3849 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3850
3851 iemRecalcExecModeAndCplFlags(pVCpu);
3852
3853 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3854}
3855
3856
3857/**
3858 * Implements exceptions and interrupts.
3859 *
3860 * All exceptions and interrupts goes thru this function!
3861 *
3862 * @returns VBox strict status code.
3863 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3864 * @param cbInstr The number of bytes to offset rIP by in the return
3865 * address.
3866 * @param u8Vector The interrupt / exception vector number.
3867 * @param fFlags The flags.
3868 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3869 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3870 */
3871VBOXSTRICTRC
3872iemRaiseXcptOrInt(PVMCPUCC pVCpu,
3873 uint8_t cbInstr,
3874 uint8_t u8Vector,
3875 uint32_t fFlags,
3876 uint16_t uErr,
3877 uint64_t uCr2) RT_NOEXCEPT
3878{
3879 /*
3880 * Get all the state that we might need here.
3881 */
3882 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3883 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3884
3885#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
3886 /*
3887 * Flush prefetch buffer
3888 */
3889 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3890#endif
3891
3892 /*
3893 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3894 */
3895 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
3896 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
3897 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
3898 | IEM_XCPT_FLAGS_BP_INSTR
3899 | IEM_XCPT_FLAGS_ICEBP_INSTR
3900 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3901 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
3902 {
3903 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3904 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3905 u8Vector = X86_XCPT_GP;
3906 uErr = 0;
3907 }
3908
3909 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3910#ifdef DBGFTRACE_ENABLED
3911 RTTraceBufAddMsgF(pVM->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3912 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3913 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
3914#endif
3915
3916 /*
3917 * Check if DBGF wants to intercept the exception.
3918 */
3919 if ( (fFlags & (IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_T_SOFT_INT))
3920 || !DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector)) )
3921 { /* likely */ }
3922 else
3923 {
3924 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector),
3925 DBGFEVENTCTX_INVALID, 1, (uint64_t)uErr);
3926 if (rcStrict != VINF_SUCCESS)
3927 return rcStrict;
3928 }
3929
3930 /*
3931 * Evaluate whether NMI blocking should be in effect.
3932 * Normally, NMI blocking is in effect whenever we inject an NMI.
3933 */
3934 bool fBlockNmi = u8Vector == X86_XCPT_NMI
3935 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);
3936
3937#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3938 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3939 {
3940 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
3941 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3942 return rcStrict0;
3943
3944 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
3945 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
3946 {
3947 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
3948 fBlockNmi = false;
3949 }
3950 }
3951#endif
3952
3953#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3954 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
3955 {
3956 /*
3957 * If the event is being injected as part of VMRUN, it isn't subject to event
3958 * intercepts in the nested-guest. However, secondary exceptions that occur
3959 * during injection of any event -are- subject to exception intercepts.
3960 *
3961 * See AMD spec. 15.20 "Event Injection".
3962 */
3963 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
3964 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
3965 else
3966 {
3967 /*
3968 * Check and handle if the event being raised is intercepted.
3969 */
3970 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3971 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
3972 return rcStrict0;
3973 }
3974 }
3975#endif
3976
3977 /*
3978 * Set NMI blocking if necessary.
3979 */
3980 if (fBlockNmi)
3981 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
3982
3983 /*
3984 * Do recursion accounting.
3985 */
3986 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
3987 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
3988 if (pVCpu->iem.s.cXcptRecursions == 0)
3989 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3990 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
3991 else
3992 {
3993 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3994 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
3995 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
3996
3997 if (pVCpu->iem.s.cXcptRecursions >= 4)
3998 {
3999#ifdef DEBUG_bird
4000 AssertFailed();
4001#endif
4002 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
4003 }
4004
4005 /*
4006 * Evaluate the sequence of recurring events.
4007 */
4008 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
4009 NULL /* pXcptRaiseInfo */);
4010 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
4011 { /* likely */ }
4012 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
4013 {
4014 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
4015 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
4016 u8Vector = X86_XCPT_DF;
4017 uErr = 0;
4018#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4019 /* VMX nested-guest #DF intercept needs to be checked here. */
4020 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4021 {
4022 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
4023 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4024 return rcStrict0;
4025 }
4026#endif
4027 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
4028 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
4029 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4030 }
4031 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
4032 {
4033 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
4034 return iemInitiateCpuShutdown(pVCpu);
4035 }
4036 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
4037 {
4038 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
4039 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
4040 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
4041 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
4042 return VERR_EM_GUEST_CPU_HANG;
4043 }
4044 else
4045 {
4046 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
4047 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
4048 return VERR_IEM_IPE_9;
4049 }
4050
4051 /*
4052 * The 'EXT' bit is set when an exception occurs during deliver of an external
4053 * event (such as an interrupt or earlier exception)[1]. Privileged software
4054 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
4055 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
4056 *
4057 * [1] - Intel spec. 6.13 "Error Code"
4058 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
4059 * [3] - Intel Instruction reference for INT n.
4060 */
4061 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
4062 && (fFlags & IEM_XCPT_FLAGS_ERR)
4063 && u8Vector != X86_XCPT_PF
4064 && u8Vector != X86_XCPT_DF)
4065 {
4066 uErr |= X86_TRAP_ERR_EXTERNAL;
4067 }
4068 }
4069
4070 pVCpu->iem.s.cXcptRecursions++;
4071 pVCpu->iem.s.uCurXcpt = u8Vector;
4072 pVCpu->iem.s.fCurXcpt = fFlags;
4073 pVCpu->iem.s.uCurXcptErr = uErr;
4074 pVCpu->iem.s.uCurXcptCr2 = uCr2;
4075
4076 /*
4077 * Extensive logging.
4078 */
4079#if defined(LOG_ENABLED) && defined(IN_RING3)
4080 if (LogIs3Enabled())
4081 {
4082 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
4083 char szRegs[4096];
4084 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4085 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4086 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4087 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4088 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4089 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4090 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4091 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4092 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4093 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4094 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4095 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4096 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4097 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4098 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4099 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4100 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4101 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4102 " efer=%016VR{efer}\n"
4103 " pat=%016VR{pat}\n"
4104 " sf_mask=%016VR{sf_mask}\n"
4105 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4106 " lstar=%016VR{lstar}\n"
4107 " star=%016VR{star} cstar=%016VR{cstar}\n"
4108 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4109 );
4110
4111 char szInstr[256];
4112 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4113 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4114 szInstr, sizeof(szInstr), NULL);
4115 Log3(("%s%s\n", szRegs, szInstr));
4116 }
4117#endif /* LOG_ENABLED */
4118
4119 /*
4120 * Stats.
4121 */
4122 uint64_t const uTimestamp = ASMReadTSC();
4123 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
4124 {
4125 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
4126 EMHistoryAddExit(pVCpu,
4127 fFlags & IEM_XCPT_FLAGS_T_EXT_INT
4128 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_IEM, u8Vector)
4129 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_IEM, u8Vector | 0x100),
4130 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, uTimestamp);
4131 }
4132 else
4133 {
4134 if (u8Vector < RT_ELEMENTS(pVCpu->iem.s.aStatXcpts))
4135 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
4136 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
4137 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, uTimestamp);
4138 if (fFlags & IEM_XCPT_FLAGS_ERR)
4139 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector | EMEXIT_F_XCPT_ERRCD), uErr, uTimestamp);
4140 if (fFlags & IEM_XCPT_FLAGS_CR2)
4141 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector | EMEXIT_F_XCPT_CR2), uCr2, uTimestamp);
4142 }
4143
4144 /*
4145 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
4146 * to ensure that a stale TLB or paging cache entry will only cause one
4147 * spurious #PF.
4148 */
4149 if ( u8Vector == X86_XCPT_PF
4150 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
4151 IEMTlbInvalidatePage(pVCpu, uCr2);
4152
4153 /*
4154 * Call the mode specific worker function.
4155 */
4156 VBOXSTRICTRC rcStrict;
4157 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4158 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4159 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
4160 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4161 else
4162 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4163
4164 /* Flush the prefetch buffer. */
4165 iemOpcodeFlushHeavy(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
4166
4167 /*
4168 * Unwind.
4169 */
4170 pVCpu->iem.s.cXcptRecursions--;
4171 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
4172 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
4173 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
4174 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel,
4175 pVCpu->cpum.GstCtx.esp, IEM_GET_CPL(pVCpu), pVCpu->iem.s.cXcptRecursions + 1));
4176 return rcStrict;
4177}
4178
4179#ifdef IEM_WITH_SETJMP
4180/**
4181 * See iemRaiseXcptOrInt. Will not return.
4182 */
4183DECL_NO_RETURN(void)
4184iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
4185 uint8_t cbInstr,
4186 uint8_t u8Vector,
4187 uint32_t fFlags,
4188 uint16_t uErr,
4189 uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP
4190{
4191 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4192 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
4193}
4194#endif
4195
4196
4197/** \#DE - 00. */
4198VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
4199{
4200 if (GCMIsInterceptingXcptDE(pVCpu))
4201 {
4202 int rc = GCMXcptDE(pVCpu, &pVCpu->cpum.GstCtx);
4203 if (rc == VINF_SUCCESS)
4204 {
4205 Log(("iemRaiseDivideError: Restarting instruction because of GCMXcptDE\n"));
4206 return VINF_IEM_RAISED_XCPT; /* must return non-zero status here to cause a instruction restart */
4207 }
4208 }
4209 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4210}
4211
4212
4213#ifdef IEM_WITH_SETJMP
4214/** \#DE - 00. */
4215DECL_NO_RETURN(void) iemRaiseDivideErrorJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4216{
4217 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4218}
4219#endif
4220
4221
4222/** \#DB - 01.
4223 * @note This automatically clear DR7.GD. */
4224VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
4225{
4226 /* This always clears RF (via IEM_XCPT_FLAGS_DRx_INSTR_BP). */
4227 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
4228 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_DRx_INSTR_BP, 0, 0);
4229}
4230
4231
4232/** \#BR - 05. */
4233VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
4234{
4235 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4236}
4237
4238
4239/** \#UD - 06. */
4240VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
4241{
4242 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4243}
4244
4245
4246#ifdef IEM_WITH_SETJMP
4247/** \#UD - 06. */
4248DECL_NO_RETURN(void) iemRaiseUndefinedOpcodeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4249{
4250 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4251}
4252#endif
4253
4254
4255/** \#NM - 07. */
4256VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
4257{
4258 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4259}
4260
4261
4262#ifdef IEM_WITH_SETJMP
4263/** \#NM - 07. */
4264DECL_NO_RETURN(void) iemRaiseDeviceNotAvailableJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4265{
4266 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4267}
4268#endif
4269
4270
4271/** \#TS(err) - 0a. */
4272VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4273{
4274 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4275}
4276
4277
4278/** \#TS(tr) - 0a. */
4279VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
4280{
4281 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4282 pVCpu->cpum.GstCtx.tr.Sel, 0);
4283}
4284
4285
4286/** \#TS(0) - 0a. */
4287VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4288{
4289 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4290 0, 0);
4291}
4292
4293
4294/** \#TS(err) - 0a. */
4295VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4296{
4297 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4298 uSel & X86_SEL_MASK_OFF_RPL, 0);
4299}
4300
4301
4302/** \#NP(err) - 0b. */
4303VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4304{
4305 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4306}
4307
4308
4309/** \#NP(sel) - 0b. */
4310VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4311{
4312 Log(("iemRaiseSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4313 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4314 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4315 uSel & ~X86_SEL_RPL, 0);
4316}
4317
4318
4319/** \#SS(seg) - 0c. */
4320VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4321{
4322 Log(("iemRaiseStackSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4323 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4324 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4325 uSel & ~X86_SEL_RPL, 0);
4326}
4327
4328
4329/** \#SS(err) - 0c. */
4330VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4331{
4332 Log(("iemRaiseStackSelectorNotPresentWithErr: cs:rip=%04x:%RX64 uErr=%#x\n",
4333 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4334 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4335}
4336
4337
4338/** \#GP(n) - 0d. */
4339VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4340{
4341 Log(("iemRaiseGeneralProtectionFault: cs:rip=%04x:%RX64 uErr=%#x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4342 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4343}
4344
4345
4346/** \#GP(0) - 0d. */
4347VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4348{
4349 Log(("iemRaiseGeneralProtectionFault0: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4350 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4351}
4352
4353#ifdef IEM_WITH_SETJMP
4354/** \#GP(0) - 0d. */
4355DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4356{
4357 Log(("iemRaiseGeneralProtectionFault0Jmp: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4358 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4359}
4360#endif
4361
4362
4363/** \#GP(sel) - 0d. */
4364VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4365{
4366 Log(("iemRaiseGeneralProtectionFaultBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
4367 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4368 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4369 Sel & ~X86_SEL_RPL, 0);
4370}
4371
4372
4373/** \#GP(0) - 0d. */
4374VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4375{
4376 Log(("iemRaiseNotCanonical: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4377 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4378}
4379
4380
4381/** \#GP(sel) - 0d. */
4382VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4383{
4384 Log(("iemRaiseSelectorBounds: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4385 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4386 NOREF(iSegReg); NOREF(fAccess);
4387 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4388 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4389}
4390
4391#ifdef IEM_WITH_SETJMP
4392/** \#GP(sel) - 0d, longjmp. */
4393DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4394{
4395 Log(("iemRaiseSelectorBoundsJmp: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4396 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4397 NOREF(iSegReg); NOREF(fAccess);
4398 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4399 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4400}
4401#endif
4402
4403/** \#GP(sel) - 0d. */
4404VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4405{
4406 Log(("iemRaiseSelectorBoundsBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
4407 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4408 NOREF(Sel);
4409 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4410}
4411
4412#ifdef IEM_WITH_SETJMP
4413/** \#GP(sel) - 0d, longjmp. */
4414DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP
4415{
4416 Log(("iemRaiseSelectorBoundsBySelectorJmp: cs:rip=%04x:%RX64 Sel=%#x\n",
4417 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4418 NOREF(Sel);
4419 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4420}
4421#endif
4422
4423
4424/** \#GP(sel) - 0d. */
4425VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4426{
4427 Log(("iemRaiseSelectorInvalidAccess: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4428 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4429 NOREF(iSegReg); NOREF(fAccess);
4430 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4431}
4432
4433#ifdef IEM_WITH_SETJMP
4434/** \#GP(sel) - 0d, longjmp. */
4435DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4436{
4437 NOREF(iSegReg); NOREF(fAccess);
4438 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4439}
4440#endif
4441
4442
4443/** \#PF(n) - 0e. */
4444VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT
4445{
4446 uint16_t uErr;
4447 switch (rc)
4448 {
4449 case VERR_PAGE_NOT_PRESENT:
4450 case VERR_PAGE_TABLE_NOT_PRESENT:
4451 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4452 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4453 uErr = 0;
4454 break;
4455
4456 default:
4457 AssertMsgFailed(("%Rrc\n", rc));
4458 RT_FALL_THRU();
4459 case VERR_ACCESS_DENIED:
4460 uErr = X86_TRAP_PF_P;
4461 break;
4462
4463 /** @todo reserved */
4464 }
4465
4466 if (IEM_GET_CPL(pVCpu) == 3)
4467 uErr |= X86_TRAP_PF_US;
4468
4469 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4470 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4471 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4472 uErr |= X86_TRAP_PF_ID;
4473
4474#if 0 /* This is so much non-sense, really. Why was it done like that? */
4475 /* Note! RW access callers reporting a WRITE protection fault, will clear
4476 the READ flag before calling. So, read-modify-write accesses (RW)
4477 can safely be reported as READ faults. */
4478 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4479 uErr |= X86_TRAP_PF_RW;
4480#else
4481 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4482 {
4483 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4484 /// (regardless of outcome of the comparison in the latter case).
4485 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4486 uErr |= X86_TRAP_PF_RW;
4487 }
4488#endif
4489
4490 /* For FXSAVE and FRSTOR the #PF is typically reported at the max address
4491 of the memory operand rather than at the start of it. (Not sure what
4492 happens if it crosses a page boundrary.) The current heuristics for
4493 this is to report the #PF for the last byte if the access is more than
4494 64 bytes. This is probably not correct, but we can work that out later,
4495 main objective now is to get FXSAVE to work like for real hardware and
4496 make bs3-cpu-basic2 work. */
4497 if (cbAccess <= 64)
4498 { /* likely*/ }
4499 else
4500 GCPtrWhere += cbAccess - 1;
4501
4502 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4503 uErr, GCPtrWhere);
4504}
4505
4506#ifdef IEM_WITH_SETJMP
4507/** \#PF(n) - 0e, longjmp. */
4508DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess,
4509 uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP
4510{
4511 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, cbAccess, fAccess, rc)));
4512}
4513#endif
4514
4515
4516/** \#MF(0) - 10. */
4517VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT
4518{
4519 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)
4520 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4521
4522 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
4523 PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
4524 return iemRegUpdateRipAndFinishClearingRF(pVCpu);
4525}
4526
4527#ifdef IEM_WITH_SETJMP
4528/** \#MF(0) - 10, longjmp. */
4529DECL_NO_RETURN(void) iemRaiseMathFaultJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4530{
4531 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseMathFault(pVCpu)));
4532}
4533#endif
4534
4535
4536/** \#AC(0) - 11. */
4537VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT
4538{
4539 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4540}
4541
4542#ifdef IEM_WITH_SETJMP
4543/** \#AC(0) - 11, longjmp. */
4544DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4545{
4546 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4547}
4548#endif
4549
4550
4551/** \#XF(0)/\#XM(0) - 19. */
4552VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
4553{
4554 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4555}
4556
4557
4558#ifdef IEM_WITH_SETJMP
4559/** \#XF(0)/\#XM(0) - 19s, longjmp. */
4560DECL_NO_RETURN(void) iemRaiseSimdFpExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4561{
4562 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseSimdFpException(pVCpu)));
4563}
4564#endif
4565
4566
4567/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4568IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4569{
4570 NOREF(cbInstr);
4571 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4572}
4573
4574
4575/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4576IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4577{
4578 NOREF(cbInstr);
4579 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4580}
4581
4582
4583/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4584IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4585{
4586 NOREF(cbInstr);
4587 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4588}
4589
4590
4591/** @} */
4592
4593/** @name Common opcode decoders.
4594 * @{
4595 */
4596//#include <iprt/mem.h>
4597
4598/**
4599 * Used to add extra details about a stub case.
4600 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4601 */
4602void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4603{
4604#if defined(LOG_ENABLED) && defined(IN_RING3)
4605 PVM pVM = pVCpu->CTX_SUFF(pVM);
4606 char szRegs[4096];
4607 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4608 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4609 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4610 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4611 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4612 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4613 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4614 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4615 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4616 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4617 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4618 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4619 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4620 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4621 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4622 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4623 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4624 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4625 " efer=%016VR{efer}\n"
4626 " pat=%016VR{pat}\n"
4627 " sf_mask=%016VR{sf_mask}\n"
4628 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4629 " lstar=%016VR{lstar}\n"
4630 " star=%016VR{star} cstar=%016VR{cstar}\n"
4631 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4632 );
4633
4634 char szInstr[256];
4635 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4636 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4637 szInstr, sizeof(szInstr), NULL);
4638
4639 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4640#else
4641 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4642#endif
4643}
4644
4645/** @} */
4646
4647
4648
4649/** @name Register Access.
4650 * @{
4651 */
4652
4653/**
4654 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4655 *
4656 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4657 * segment limit.
4658 *
4659 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4660 * @param cbInstr Instruction size.
4661 * @param offNextInstr The offset of the next instruction.
4662 * @param enmEffOpSize Effective operand size.
4663 */
4664VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
4665 IEMMODE enmEffOpSize) RT_NOEXCEPT
4666{
4667 switch (enmEffOpSize)
4668 {
4669 case IEMMODE_16BIT:
4670 {
4671 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
4672 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4673 || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */))
4674 pVCpu->cpum.GstCtx.rip = uNewIp;
4675 else
4676 return iemRaiseGeneralProtectionFault0(pVCpu);
4677 break;
4678 }
4679
4680 case IEMMODE_32BIT:
4681 {
4682 Assert(!IEM_IS_64BIT_CODE(pVCpu));
4683 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4684
4685 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
4686 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4687 pVCpu->cpum.GstCtx.rip = uNewEip;
4688 else
4689 return iemRaiseGeneralProtectionFault0(pVCpu);
4690 break;
4691 }
4692
4693 case IEMMODE_64BIT:
4694 {
4695 Assert(IEM_IS_64BIT_CODE(pVCpu));
4696
4697 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4698 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4699 pVCpu->cpum.GstCtx.rip = uNewRip;
4700 else
4701 return iemRaiseGeneralProtectionFault0(pVCpu);
4702 break;
4703 }
4704
4705 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4706 }
4707
4708#ifndef IEM_WITH_CODE_TLB
4709 /* Flush the prefetch buffer. */
4710 pVCpu->iem.s.cbOpcode = cbInstr;
4711#endif
4712
4713 /*
4714 * Clear RF and finish the instruction (maybe raise #DB).
4715 */
4716 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
4717}
4718
4719
4720/**
4721 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4722 *
4723 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4724 * segment limit.
4725 *
4726 * @returns Strict VBox status code.
4727 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4728 * @param cbInstr Instruction size.
4729 * @param offNextInstr The offset of the next instruction.
4730 */
4731VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT
4732{
4733 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
4734
4735 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
4736 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4737 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checking in 64-bit mode */))
4738 pVCpu->cpum.GstCtx.rip = uNewIp;
4739 else
4740 return iemRaiseGeneralProtectionFault0(pVCpu);
4741
4742#ifndef IEM_WITH_CODE_TLB
4743 /* Flush the prefetch buffer. */
4744 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4745#endif
4746
4747 /*
4748 * Clear RF and finish the instruction (maybe raise #DB).
4749 */
4750 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
4751}
4752
4753
4754/**
4755 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4756 *
4757 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4758 * segment limit.
4759 *
4760 * @returns Strict VBox status code.
4761 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4762 * @param cbInstr Instruction size.
4763 * @param offNextInstr The offset of the next instruction.
4764 * @param enmEffOpSize Effective operand size.
4765 */
4766VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
4767 IEMMODE enmEffOpSize) RT_NOEXCEPT
4768{
4769 if (enmEffOpSize == IEMMODE_32BIT)
4770 {
4771 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
4772
4773 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
4774 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4775 pVCpu->cpum.GstCtx.rip = uNewEip;
4776 else
4777 return iemRaiseGeneralProtectionFault0(pVCpu);
4778 }
4779 else
4780 {
4781 Assert(enmEffOpSize == IEMMODE_64BIT);
4782
4783 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4784 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4785 pVCpu->cpum.GstCtx.rip = uNewRip;
4786 else
4787 return iemRaiseGeneralProtectionFault0(pVCpu);
4788 }
4789
4790#ifndef IEM_WITH_CODE_TLB
4791 /* Flush the prefetch buffer. */
4792 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4793#endif
4794
4795 /*
4796 * Clear RF and finish the instruction (maybe raise #DB).
4797 */
4798 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
4799}
4800
4801/** @} */
4802
4803
4804/** @name FPU access and helpers.
4805 *
4806 * @{
4807 */
4808
4809/**
4810 * Updates the x87.DS and FPUDP registers.
4811 *
4812 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4813 * @param pFpuCtx The FPU context.
4814 * @param iEffSeg The effective segment register.
4815 * @param GCPtrEff The effective address relative to @a iEffSeg.
4816 */
4817DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4818{
4819 RTSEL sel;
4820 switch (iEffSeg)
4821 {
4822 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
4823 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
4824 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
4825 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
4826 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
4827 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
4828 default:
4829 AssertMsgFailed(("%d\n", iEffSeg));
4830 sel = pVCpu->cpum.GstCtx.ds.Sel;
4831 }
4832 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
4833 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4834 {
4835 pFpuCtx->DS = 0;
4836 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
4837 }
4838 else if (!IEM_IS_LONG_MODE(pVCpu)) /** @todo this is weird. explain. */
4839 {
4840 pFpuCtx->DS = sel;
4841 pFpuCtx->FPUDP = GCPtrEff;
4842 }
4843 else
4844 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
4845}
4846
4847
4848/**
4849 * Rotates the stack registers in the push direction.
4850 *
4851 * @param pFpuCtx The FPU context.
4852 * @remarks This is a complete waste of time, but fxsave stores the registers in
4853 * stack order.
4854 */
4855DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
4856{
4857 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
4858 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
4859 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
4860 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
4861 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
4862 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
4863 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
4864 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
4865 pFpuCtx->aRegs[0].r80 = r80Tmp;
4866}
4867
4868
4869/**
4870 * Rotates the stack registers in the pop direction.
4871 *
4872 * @param pFpuCtx The FPU context.
4873 * @remarks This is a complete waste of time, but fxsave stores the registers in
4874 * stack order.
4875 */
4876DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
4877{
4878 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
4879 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
4880 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
4881 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
4882 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
4883 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
4884 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
4885 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
4886 pFpuCtx->aRegs[7].r80 = r80Tmp;
4887}
4888
4889
4890/**
4891 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
4892 * exception prevents it.
4893 *
4894 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4895 * @param pResult The FPU operation result to push.
4896 * @param pFpuCtx The FPU context.
4897 */
4898static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4899{
4900 /* Update FSW and bail if there are pending exceptions afterwards. */
4901 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4902 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4903 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4904 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4905 {
4906 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))
4907 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",
4908 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4909 pFpuCtx->FSW = fFsw;
4910 return;
4911 }
4912
4913 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4914 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4915 {
4916 /* All is fine, push the actual value. */
4917 pFpuCtx->FTW |= RT_BIT(iNewTop);
4918 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
4919 }
4920 else if (pFpuCtx->FCW & X86_FCW_IM)
4921 {
4922 /* Masked stack overflow, push QNaN. */
4923 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4924 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4925 }
4926 else
4927 {
4928 /* Raise stack overflow, don't push anything. */
4929 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4930 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4931 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",
4932 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4933 return;
4934 }
4935
4936 fFsw &= ~X86_FSW_TOP_MASK;
4937 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4938 pFpuCtx->FSW = fFsw;
4939
4940 iemFpuRotateStackPush(pFpuCtx);
4941 RT_NOREF(pVCpu);
4942}
4943
4944
4945/**
4946 * Stores a result in a FPU register and updates the FSW and FTW.
4947 *
4948 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4949 * @param pFpuCtx The FPU context.
4950 * @param pResult The result to store.
4951 * @param iStReg Which FPU register to store it in.
4952 */
4953static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4954{
4955 Assert(iStReg < 8);
4956 uint16_t fNewFsw = pFpuCtx->FSW;
4957 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;
4958 fNewFsw &= ~X86_FSW_C_MASK;
4959 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4960 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4961 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4962 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4963 pFpuCtx->FSW = fNewFsw;
4964 pFpuCtx->FTW |= RT_BIT(iReg);
4965 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
4966 RT_NOREF(pVCpu);
4967}
4968
4969
4970/**
4971 * Only updates the FPU status word (FSW) with the result of the current
4972 * instruction.
4973 *
4974 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4975 * @param pFpuCtx The FPU context.
4976 * @param u16FSW The FSW output of the current instruction.
4977 */
4978static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
4979{
4980 uint16_t fNewFsw = pFpuCtx->FSW;
4981 fNewFsw &= ~X86_FSW_C_MASK;
4982 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;
4983 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4984 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4985 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4986 pFpuCtx->FSW = fNewFsw;
4987 RT_NOREF(pVCpu);
4988}
4989
4990
4991/**
4992 * Pops one item off the FPU stack if no pending exception prevents it.
4993 *
4994 * @param pFpuCtx The FPU context.
4995 */
4996static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4997{
4998 /* Check pending exceptions. */
4999 uint16_t uFSW = pFpuCtx->FSW;
5000 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5001 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5002 return;
5003
5004 /* TOP--. */
5005 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
5006 uFSW &= ~X86_FSW_TOP_MASK;
5007 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5008 pFpuCtx->FSW = uFSW;
5009
5010 /* Mark the previous ST0 as empty. */
5011 iOldTop >>= X86_FSW_TOP_SHIFT;
5012 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
5013
5014 /* Rotate the registers. */
5015 iemFpuRotateStackPop(pFpuCtx);
5016}
5017
5018
5019/**
5020 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
5021 *
5022 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5023 * @param pResult The FPU operation result to push.
5024 * @param uFpuOpcode The FPU opcode value.
5025 */
5026void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
5027{
5028 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5029 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5030 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
5031}
5032
5033
5034/**
5035 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
5036 * and sets FPUDP and FPUDS.
5037 *
5038 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5039 * @param pResult The FPU operation result to push.
5040 * @param iEffSeg The effective segment register.
5041 * @param GCPtrEff The effective address relative to @a iEffSeg.
5042 * @param uFpuOpcode The FPU opcode value.
5043 */
5044void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5045 uint16_t uFpuOpcode) RT_NOEXCEPT
5046{
5047 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5048 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5049 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5050 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
5051}
5052
5053
5054/**
5055 * Replace ST0 with the first value and push the second onto the FPU stack,
5056 * unless a pending exception prevents it.
5057 *
5058 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5059 * @param pResult The FPU operation result to store and push.
5060 * @param uFpuOpcode The FPU opcode value.
5061 */
5062void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
5063{
5064 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5065 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5066
5067 /* Update FSW and bail if there are pending exceptions afterwards. */
5068 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5069 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5070 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5071 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5072 {
5073 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
5074 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",
5075 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
5076 pFpuCtx->FSW = fFsw;
5077 return;
5078 }
5079
5080 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5081 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5082 {
5083 /* All is fine, push the actual value. */
5084 pFpuCtx->FTW |= RT_BIT(iNewTop);
5085 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
5086 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
5087 }
5088 else if (pFpuCtx->FCW & X86_FCW_IM)
5089 {
5090 /* Masked stack overflow, push QNaN. */
5091 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5092 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5093 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5094 }
5095 else
5096 {
5097 /* Raise stack overflow, don't push anything. */
5098 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5099 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5100 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",
5101 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5102 return;
5103 }
5104
5105 fFsw &= ~X86_FSW_TOP_MASK;
5106 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5107 pFpuCtx->FSW = fFsw;
5108
5109 iemFpuRotateStackPush(pFpuCtx);
5110}
5111
5112
5113/**
5114 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5115 * FOP.
5116 *
5117 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5118 * @param pResult The result to store.
5119 * @param iStReg Which FPU register to store it in.
5120 * @param uFpuOpcode The FPU opcode value.
5121 */
5122void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5123{
5124 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5125 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5126 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5127}
5128
5129
5130/**
5131 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5132 * FOP, and then pops the stack.
5133 *
5134 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5135 * @param pResult The result to store.
5136 * @param iStReg Which FPU register to store it in.
5137 * @param uFpuOpcode The FPU opcode value.
5138 */
5139void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5140{
5141 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5142 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5143 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5144 iemFpuMaybePopOne(pFpuCtx);
5145}
5146
5147
5148/**
5149 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5150 * FPUDP, and FPUDS.
5151 *
5152 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5153 * @param pResult The result to store.
5154 * @param iStReg Which FPU register to store it in.
5155 * @param iEffSeg The effective memory operand selector register.
5156 * @param GCPtrEff The effective memory operand offset.
5157 * @param uFpuOpcode The FPU opcode value.
5158 */
5159void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5160 uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5161{
5162 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5163 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5164 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5165 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5166}
5167
5168
5169/**
5170 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5171 * FPUDP, and FPUDS, and then pops the stack.
5172 *
5173 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5174 * @param pResult The result to store.
5175 * @param iStReg Which FPU register to store it in.
5176 * @param iEffSeg The effective memory operand selector register.
5177 * @param GCPtrEff The effective memory operand offset.
5178 * @param uFpuOpcode The FPU opcode value.
5179 */
5180void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
5181 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5182{
5183 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5184 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5185 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5186 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5187 iemFpuMaybePopOne(pFpuCtx);
5188}
5189
5190
5191/**
5192 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5193 *
5194 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5195 * @param uFpuOpcode The FPU opcode value.
5196 */
5197void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5198{
5199 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5200 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5201}
5202
5203
5204/**
5205 * Updates the FSW, FOP, FPUIP, and FPUCS.
5206 *
5207 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5208 * @param u16FSW The FSW from the current instruction.
5209 * @param uFpuOpcode The FPU opcode value.
5210 */
5211void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5212{
5213 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5214 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5215 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5216}
5217
5218
5219/**
5220 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5221 *
5222 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5223 * @param u16FSW The FSW from the current instruction.
5224 * @param uFpuOpcode The FPU opcode value.
5225 */
5226void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5227{
5228 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5229 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5230 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5231 iemFpuMaybePopOne(pFpuCtx);
5232}
5233
5234
5235/**
5236 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5237 *
5238 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5239 * @param u16FSW The FSW from the current instruction.
5240 * @param iEffSeg The effective memory operand selector register.
5241 * @param GCPtrEff The effective memory operand offset.
5242 * @param uFpuOpcode The FPU opcode value.
5243 */
5244void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5245{
5246 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5247 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5248 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5249 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5250}
5251
5252
5253/**
5254 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5255 *
5256 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5257 * @param u16FSW The FSW from the current instruction.
5258 * @param uFpuOpcode The FPU opcode value.
5259 */
5260void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5261{
5262 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5263 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5264 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5265 iemFpuMaybePopOne(pFpuCtx);
5266 iemFpuMaybePopOne(pFpuCtx);
5267}
5268
5269
5270/**
5271 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5272 *
5273 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5274 * @param u16FSW The FSW from the current instruction.
5275 * @param iEffSeg The effective memory operand selector register.
5276 * @param GCPtrEff The effective memory operand offset.
5277 * @param uFpuOpcode The FPU opcode value.
5278 */
5279void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5280 uint16_t uFpuOpcode) RT_NOEXCEPT
5281{
5282 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5283 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5284 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5285 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5286 iemFpuMaybePopOne(pFpuCtx);
5287}
5288
5289
5290/**
5291 * Worker routine for raising an FPU stack underflow exception.
5292 *
5293 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5294 * @param pFpuCtx The FPU context.
5295 * @param iStReg The stack register being accessed.
5296 */
5297static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5298{
5299 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5300 if (pFpuCtx->FCW & X86_FCW_IM)
5301 {
5302 /* Masked underflow. */
5303 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5304 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5305 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5306 if (iStReg != UINT8_MAX)
5307 {
5308 pFpuCtx->FTW |= RT_BIT(iReg);
5309 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5310 }
5311 }
5312 else
5313 {
5314 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5315 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5316 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",
5317 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5318 }
5319 RT_NOREF(pVCpu);
5320}
5321
5322
5323/**
5324 * Raises a FPU stack underflow exception.
5325 *
5326 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5327 * @param iStReg The destination register that should be loaded
5328 * with QNaN if \#IS is not masked. Specify
5329 * UINT8_MAX if none (like for fcom).
5330 * @param uFpuOpcode The FPU opcode value.
5331 */
5332void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5333{
5334 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5335 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5336 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5337}
5338
5339
5340void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5341{
5342 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5343 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5344 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5345 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5346}
5347
5348
5349void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5350{
5351 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5352 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5353 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5354 iemFpuMaybePopOne(pFpuCtx);
5355}
5356
5357
5358void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5359 uint16_t uFpuOpcode) RT_NOEXCEPT
5360{
5361 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5362 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5363 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5364 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5365 iemFpuMaybePopOne(pFpuCtx);
5366}
5367
5368
5369void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5370{
5371 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5372 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5373 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
5374 iemFpuMaybePopOne(pFpuCtx);
5375 iemFpuMaybePopOne(pFpuCtx);
5376}
5377
5378
5379void iemFpuStackPushUnderflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5380{
5381 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5382 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5383
5384 if (pFpuCtx->FCW & X86_FCW_IM)
5385 {
5386 /* Masked overflow - Push QNaN. */
5387 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5388 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5389 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5390 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5391 pFpuCtx->FTW |= RT_BIT(iNewTop);
5392 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5393 iemFpuRotateStackPush(pFpuCtx);
5394 }
5395 else
5396 {
5397 /* Exception pending - don't change TOP or the register stack. */
5398 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5399 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5400 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",
5401 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5402 }
5403}
5404
5405
5406void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5407{
5408 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5409 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5410
5411 if (pFpuCtx->FCW & X86_FCW_IM)
5412 {
5413 /* Masked overflow - Push QNaN. */
5414 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5415 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5416 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5417 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5418 pFpuCtx->FTW |= RT_BIT(iNewTop);
5419 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5420 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5421 iemFpuRotateStackPush(pFpuCtx);
5422 }
5423 else
5424 {
5425 /* Exception pending - don't change TOP or the register stack. */
5426 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5427 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5428 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",
5429 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5430 }
5431}
5432
5433
5434/**
5435 * Worker routine for raising an FPU stack overflow exception on a push.
5436 *
5437 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5438 * @param pFpuCtx The FPU context.
5439 */
5440static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5441{
5442 if (pFpuCtx->FCW & X86_FCW_IM)
5443 {
5444 /* Masked overflow. */
5445 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5446 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5447 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5448 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5449 pFpuCtx->FTW |= RT_BIT(iNewTop);
5450 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5451 iemFpuRotateStackPush(pFpuCtx);
5452 }
5453 else
5454 {
5455 /* Exception pending - don't change TOP or the register stack. */
5456 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5457 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5458 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",
5459 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5460 }
5461 RT_NOREF(pVCpu);
5462}
5463
5464
5465/**
5466 * Raises a FPU stack overflow exception on a push.
5467 *
5468 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5469 * @param uFpuOpcode The FPU opcode value.
5470 */
5471void iemFpuStackPushOverflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5472{
5473 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5474 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5475 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5476}
5477
5478
5479/**
5480 * Raises a FPU stack overflow exception on a push with a memory operand.
5481 *
5482 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5483 * @param iEffSeg The effective memory operand selector register.
5484 * @param GCPtrEff The effective memory operand offset.
5485 * @param uFpuOpcode The FPU opcode value.
5486 */
5487void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5488{
5489 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5490 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5491 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5492 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5493}
5494
5495/** @} */
5496
5497
5498/** @name Memory access.
5499 *
5500 * @{
5501 */
5502
5503#undef LOG_GROUP
5504#define LOG_GROUP LOG_GROUP_IEM_MEM
5505
5506/**
5507 * Updates the IEMCPU::cbWritten counter if applicable.
5508 *
5509 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5510 * @param fAccess The access being accounted for.
5511 * @param cbMem The access size.
5512 */
5513DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5514{
5515 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5516 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5517 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5518}
5519
5520
5521/**
5522 * Applies the segment limit, base and attributes.
5523 *
5524 * This may raise a \#GP or \#SS.
5525 *
5526 * @returns VBox strict status code.
5527 *
5528 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5529 * @param fAccess The kind of access which is being performed.
5530 * @param iSegReg The index of the segment register to apply.
5531 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5532 * TSS, ++).
5533 * @param cbMem The access size.
5534 * @param pGCPtrMem Pointer to the guest memory address to apply
5535 * segmentation to. Input and output parameter.
5536 */
5537VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5538{
5539 if (iSegReg == UINT8_MAX)
5540 return VINF_SUCCESS;
5541
5542 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5543 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5544 switch (IEM_GET_CPU_MODE(pVCpu))
5545 {
5546 case IEMMODE_16BIT:
5547 case IEMMODE_32BIT:
5548 {
5549 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5550 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5551
5552 if ( pSel->Attr.n.u1Present
5553 && !pSel->Attr.n.u1Unusable)
5554 {
5555 Assert(pSel->Attr.n.u1DescType);
5556 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5557 {
5558 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5559 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5560 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5561
5562 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5563 {
5564 /** @todo CPL check. */
5565 }
5566
5567 /*
5568 * There are two kinds of data selectors, normal and expand down.
5569 */
5570 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5571 {
5572 if ( GCPtrFirst32 > pSel->u32Limit
5573 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5574 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5575 }
5576 else
5577 {
5578 /*
5579 * The upper boundary is defined by the B bit, not the G bit!
5580 */
5581 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5582 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5583 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5584 }
5585 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5586 }
5587 else
5588 {
5589 /*
5590 * Code selector and usually be used to read thru, writing is
5591 * only permitted in real and V8086 mode.
5592 */
5593 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5594 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5595 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5596 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5597 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5598
5599 if ( GCPtrFirst32 > pSel->u32Limit
5600 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5601 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5602
5603 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5604 {
5605 /** @todo CPL check. */
5606 }
5607
5608 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5609 }
5610 }
5611 else
5612 return iemRaiseGeneralProtectionFault0(pVCpu);
5613 return VINF_SUCCESS;
5614 }
5615
5616 case IEMMODE_64BIT:
5617 {
5618 RTGCPTR GCPtrMem = *pGCPtrMem;
5619 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5620 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5621
5622 Assert(cbMem >= 1);
5623 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5624 return VINF_SUCCESS;
5625 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5626 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5627 return iemRaiseGeneralProtectionFault0(pVCpu);
5628 }
5629
5630 default:
5631 AssertFailedReturn(VERR_IEM_IPE_7);
5632 }
5633}
5634
5635
5636/**
5637 * Translates a virtual address to a physical physical address and checks if we
5638 * can access the page as specified.
5639 *
5640 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5641 * @param GCPtrMem The virtual address.
5642 * @param cbAccess The access size, for raising \#PF correctly for
5643 * FXSAVE and such.
5644 * @param fAccess The intended access.
5645 * @param pGCPhysMem Where to return the physical address.
5646 */
5647VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess,
5648 uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5649{
5650 /** @todo Need a different PGM interface here. We're currently using
5651 * generic / REM interfaces. this won't cut it for R0. */
5652 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5653 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5654 * here. */
5655 PGMPTWALK Walk;
5656 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
5657 if (RT_FAILURE(rc))
5658 {
5659 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5660 /** @todo Check unassigned memory in unpaged mode. */
5661 /** @todo Reserved bits in page tables. Requires new PGM interface. */
5662#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5663 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5664 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5665#endif
5666 *pGCPhysMem = NIL_RTGCPHYS;
5667 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, rc);
5668 }
5669
5670 /* If the page is writable and does not have the no-exec bit set, all
5671 access is allowed. Otherwise we'll have to check more carefully... */
5672 if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
5673 {
5674 /* Write to read only memory? */
5675 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5676 && !(Walk.fEffective & X86_PTE_RW)
5677 && ( ( IEM_GET_CPL(pVCpu) == 3
5678 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5679 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
5680 {
5681 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5682 *pGCPhysMem = NIL_RTGCPHYS;
5683#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5684 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5685 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5686#endif
5687 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5688 }
5689
5690 /* Kernel memory accessed by userland? */
5691 if ( !(Walk.fEffective & X86_PTE_US)
5692 && IEM_GET_CPL(pVCpu) == 3
5693 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5694 {
5695 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5696 *pGCPhysMem = NIL_RTGCPHYS;
5697#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5698 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5699 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5700#endif
5701 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, VERR_ACCESS_DENIED);
5702 }
5703
5704 /* Executing non-executable memory? */
5705 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
5706 && (Walk.fEffective & X86_PTE_PAE_NX)
5707 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
5708 {
5709 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
5710 *pGCPhysMem = NIL_RTGCPHYS;
5711#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5712 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5713 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5714#endif
5715 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
5716 VERR_ACCESS_DENIED);
5717 }
5718 }
5719
5720 /*
5721 * Set the dirty / access flags.
5722 * ASSUMES this is set when the address is translated rather than on committ...
5723 */
5724 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5725 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
5726 if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty)
5727 {
5728 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
5729 AssertRC(rc2);
5730 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
5731 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
5732 }
5733
5734 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
5735 *pGCPhysMem = GCPhys;
5736 return VINF_SUCCESS;
5737}
5738
5739#if 0 /*unused*/
5740/**
5741 * Looks up a memory mapping entry.
5742 *
5743 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
5744 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5745 * @param pvMem The memory address.
5746 * @param fAccess The access to.
5747 */
5748DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
5749{
5750 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5751 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
5752 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
5753 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5754 return 0;
5755 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
5756 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5757 return 1;
5758 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
5759 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5760 return 2;
5761 return VERR_NOT_FOUND;
5762}
5763#endif
5764
5765/**
5766 * Finds a free memmap entry when using iNextMapping doesn't work.
5767 *
5768 * @returns Memory mapping index, 1024 on failure.
5769 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5770 */
5771static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
5772{
5773 /*
5774 * The easy case.
5775 */
5776 if (pVCpu->iem.s.cActiveMappings == 0)
5777 {
5778 pVCpu->iem.s.iNextMapping = 1;
5779 return 0;
5780 }
5781
5782 /* There should be enough mappings for all instructions. */
5783 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
5784
5785 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
5786 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
5787 return i;
5788
5789 AssertFailedReturn(1024);
5790}
5791
5792
5793/**
5794 * Commits a bounce buffer that needs writing back and unmaps it.
5795 *
5796 * @returns Strict VBox status code.
5797 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5798 * @param iMemMap The index of the buffer to commit.
5799 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
5800 * Always false in ring-3, obviously.
5801 */
5802static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
5803{
5804 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
5805 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
5806#ifdef IN_RING3
5807 Assert(!fPostponeFail);
5808 RT_NOREF_PV(fPostponeFail);
5809#endif
5810
5811 /*
5812 * Do the writing.
5813 */
5814 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5815 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
5816 {
5817 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
5818 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5819 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5820 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
5821 {
5822 /*
5823 * Carefully and efficiently dealing with access handler return
5824 * codes make this a little bloated.
5825 */
5826 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
5827 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5828 pbBuf,
5829 cbFirst,
5830 PGMACCESSORIGIN_IEM);
5831 if (rcStrict == VINF_SUCCESS)
5832 {
5833 if (cbSecond)
5834 {
5835 rcStrict = PGMPhysWrite(pVM,
5836 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5837 pbBuf + cbFirst,
5838 cbSecond,
5839 PGMACCESSORIGIN_IEM);
5840 if (rcStrict == VINF_SUCCESS)
5841 { /* nothing */ }
5842 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5843 {
5844 LogEx(LOG_GROUP_IEM,
5845 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
5846 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5847 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5848 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5849 }
5850#ifndef IN_RING3
5851 else if (fPostponeFail)
5852 {
5853 LogEx(LOG_GROUP_IEM,
5854 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5855 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5856 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5857 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5858 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5859 return iemSetPassUpStatus(pVCpu, rcStrict);
5860 }
5861#endif
5862 else
5863 {
5864 LogEx(LOG_GROUP_IEM,
5865 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5866 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5867 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5868 return rcStrict;
5869 }
5870 }
5871 }
5872 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5873 {
5874 if (!cbSecond)
5875 {
5876 LogEx(LOG_GROUP_IEM,
5877 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
5878 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5879 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5880 }
5881 else
5882 {
5883 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
5884 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5885 pbBuf + cbFirst,
5886 cbSecond,
5887 PGMACCESSORIGIN_IEM);
5888 if (rcStrict2 == VINF_SUCCESS)
5889 {
5890 LogEx(LOG_GROUP_IEM,
5891 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
5892 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5893 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5894 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5895 }
5896 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5897 {
5898 LogEx(LOG_GROUP_IEM,
5899 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
5900 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5901 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5902 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5903 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5904 }
5905#ifndef IN_RING3
5906 else if (fPostponeFail)
5907 {
5908 LogEx(LOG_GROUP_IEM,
5909 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5910 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5911 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5912 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5913 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5914 return iemSetPassUpStatus(pVCpu, rcStrict);
5915 }
5916#endif
5917 else
5918 {
5919 LogEx(LOG_GROUP_IEM,
5920 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5921 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5922 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5923 return rcStrict2;
5924 }
5925 }
5926 }
5927#ifndef IN_RING3
5928 else if (fPostponeFail)
5929 {
5930 LogEx(LOG_GROUP_IEM,
5931 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5932 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5933 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5934 if (!cbSecond)
5935 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
5936 else
5937 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
5938 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5939 return iemSetPassUpStatus(pVCpu, rcStrict);
5940 }
5941#endif
5942 else
5943 {
5944 LogEx(LOG_GROUP_IEM,
5945 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5946 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5947 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5948 return rcStrict;
5949 }
5950 }
5951 else
5952 {
5953 /*
5954 * No access handlers, much simpler.
5955 */
5956 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
5957 if (RT_SUCCESS(rc))
5958 {
5959 if (cbSecond)
5960 {
5961 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
5962 if (RT_SUCCESS(rc))
5963 { /* likely */ }
5964 else
5965 {
5966 LogEx(LOG_GROUP_IEM,
5967 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5968 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5969 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
5970 return rc;
5971 }
5972 }
5973 }
5974 else
5975 {
5976 LogEx(LOG_GROUP_IEM,
5977 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5978 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
5979 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5980 return rc;
5981 }
5982 }
5983 }
5984
5985#if defined(IEM_LOG_MEMORY_WRITES)
5986 Log5(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5987 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
5988 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
5989 Log5(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5990 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
5991 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
5992
5993 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5994 g_cbIemWrote = cbWrote;
5995 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
5996#endif
5997
5998 /*
5999 * Free the mapping entry.
6000 */
6001 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6002 Assert(pVCpu->iem.s.cActiveMappings != 0);
6003 pVCpu->iem.s.cActiveMappings--;
6004 return VINF_SUCCESS;
6005}
6006
6007
6008/**
6009 * iemMemMap worker that deals with a request crossing pages.
6010 */
6011static VBOXSTRICTRC
6012iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, uint8_t *pbUnmapInfo,
6013 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
6014{
6015 Assert(cbMem <= GUEST_PAGE_SIZE);
6016
6017 /*
6018 * Do the address translations.
6019 */
6020 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
6021 RTGCPHYS GCPhysFirst;
6022 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
6023 if (rcStrict != VINF_SUCCESS)
6024 return rcStrict;
6025 Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
6026
6027 uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
6028 RTGCPHYS GCPhysSecond;
6029 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
6030 cbSecondPage, fAccess, &GCPhysSecond);
6031 if (rcStrict != VINF_SUCCESS)
6032 return rcStrict;
6033 Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
6034 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
6035
6036 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6037
6038 /*
6039 * Read in the current memory content if it's a read, execute or partial
6040 * write access.
6041 */
6042 uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6043
6044 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6045 {
6046 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6047 {
6048 /*
6049 * Must carefully deal with access handler status codes here,
6050 * makes the code a bit bloated.
6051 */
6052 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
6053 if (rcStrict == VINF_SUCCESS)
6054 {
6055 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6056 if (rcStrict == VINF_SUCCESS)
6057 { /*likely */ }
6058 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6059 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6060 else
6061 {
6062 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
6063 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6064 return rcStrict;
6065 }
6066 }
6067 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6068 {
6069 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6070 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6071 {
6072 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6073 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6074 }
6075 else
6076 {
6077 LogEx(LOG_GROUP_IEM,
6078 ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6079 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6080 return rcStrict2;
6081 }
6082 }
6083 else
6084 {
6085 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6086 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6087 return rcStrict;
6088 }
6089 }
6090 else
6091 {
6092 /*
6093 * No informational status codes here, much more straight forward.
6094 */
6095 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6096 if (RT_SUCCESS(rc))
6097 {
6098 Assert(rc == VINF_SUCCESS);
6099 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6100 if (RT_SUCCESS(rc))
6101 Assert(rc == VINF_SUCCESS);
6102 else
6103 {
6104 LogEx(LOG_GROUP_IEM,
6105 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6106 return rc;
6107 }
6108 }
6109 else
6110 {
6111 LogEx(LOG_GROUP_IEM,
6112 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6113 return rc;
6114 }
6115 }
6116 }
6117#ifdef VBOX_STRICT
6118 else
6119 memset(pbBuf, 0xcc, cbMem);
6120 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6121 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6122#endif
6123 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
6124
6125 /*
6126 * Commit the bounce buffer entry.
6127 */
6128 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6129 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6130 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6131 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6132 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
6133 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6134 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6135 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6136 pVCpu->iem.s.cActiveMappings++;
6137
6138 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6139 *ppvMem = pbBuf;
6140 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6141 return VINF_SUCCESS;
6142}
6143
6144
6145/**
6146 * iemMemMap woker that deals with iemMemPageMap failures.
6147 */
6148static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem,
6149 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6150{
6151 /*
6152 * Filter out conditions we can handle and the ones which shouldn't happen.
6153 */
6154 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6155 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6156 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6157 {
6158 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6159 return rcMap;
6160 }
6161 pVCpu->iem.s.cPotentialExits++;
6162
6163 /*
6164 * Read in the current memory content if it's a read, execute or partial
6165 * write access.
6166 */
6167 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6168 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6169 {
6170 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6171 memset(pbBuf, 0xff, cbMem);
6172 else
6173 {
6174 int rc;
6175 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6176 {
6177 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6178 if (rcStrict == VINF_SUCCESS)
6179 { /* nothing */ }
6180 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6181 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6182 else
6183 {
6184 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6185 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6186 return rcStrict;
6187 }
6188 }
6189 else
6190 {
6191 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
6192 if (RT_SUCCESS(rc))
6193 { /* likely */ }
6194 else
6195 {
6196 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6197 GCPhysFirst, rc));
6198 return rc;
6199 }
6200 }
6201 }
6202 }
6203#ifdef VBOX_STRICT
6204 else
6205 memset(pbBuf, 0xcc, cbMem);
6206#endif
6207#ifdef VBOX_STRICT
6208 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6209 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6210#endif
6211
6212 /*
6213 * Commit the bounce buffer entry.
6214 */
6215 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6216 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6217 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6218 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
6219 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6220 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6221 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6222 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6223 pVCpu->iem.s.cActiveMappings++;
6224
6225 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6226 *ppvMem = pbBuf;
6227 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6228 return VINF_SUCCESS;
6229}
6230
6231
6232
6233/**
6234 * Maps the specified guest memory for the given kind of access.
6235 *
6236 * This may be using bounce buffering of the memory if it's crossing a page
6237 * boundary or if there is an access handler installed for any of it. Because
6238 * of lock prefix guarantees, we're in for some extra clutter when this
6239 * happens.
6240 *
6241 * This may raise a \#GP, \#SS, \#PF or \#AC.
6242 *
6243 * @returns VBox strict status code.
6244 *
6245 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6246 * @param ppvMem Where to return the pointer to the mapped memory.
6247 * @param pbUnmapInfo Where to return unmap info to be passed to
6248 * iemMemCommitAndUnmap or iemMemRollbackAndUnmap when
6249 * done.
6250 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
6251 * 8, 12, 16, 32 or 512. When used by string operations
6252 * it can be up to a page.
6253 * @param iSegReg The index of the segment register to use for this
6254 * access. The base and limits are checked. Use UINT8_MAX
6255 * to indicate that no segmentation is required (for IDT,
6256 * GDT and LDT accesses).
6257 * @param GCPtrMem The address of the guest memory.
6258 * @param fAccess How the memory is being accessed. The
6259 * IEM_ACCESS_TYPE_XXX part is used to figure out how to
6260 * map the memory, while the IEM_ACCESS_WHAT_XXX part is
6261 * used when raising exceptions. The IEM_ACCESS_ATOMIC and
6262 * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be
6263 * set.
6264 * @param uAlignCtl Alignment control:
6265 * - Bits 15:0 is the alignment mask.
6266 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6267 * IEM_MEMMAP_F_ALIGN_SSE, and
6268 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6269 * Pass zero to skip alignment.
6270 */
6271VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
6272 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
6273{
6274 /*
6275 * Check the input and figure out which mapping entry to use.
6276 */
6277 Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));
6278 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94
6279 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );
6280 Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE)));
6281 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6282
6283 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6284 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6285 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6286 {
6287 iMemMap = iemMemMapFindFree(pVCpu);
6288 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6289 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6290 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6291 pVCpu->iem.s.aMemMappings[2].fAccess),
6292 VERR_IEM_IPE_9);
6293 }
6294
6295 /*
6296 * Map the memory, checking that we can actually access it. If something
6297 * slightly complicated happens, fall back on bounce buffering.
6298 */
6299 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6300 if (rcStrict == VINF_SUCCESS)
6301 { /* likely */ }
6302 else
6303 return rcStrict;
6304
6305 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
6306 { /* likely */ }
6307 else
6308 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
6309
6310 /*
6311 * Alignment check.
6312 */
6313 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6314 { /* likelyish */ }
6315 else
6316 {
6317 /* Misaligned access. */
6318 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6319 {
6320 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6321 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6322 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6323 {
6324 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6325
6326 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6327 return iemRaiseAlignmentCheckException(pVCpu);
6328 }
6329 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6330 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6331 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6332 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6333 * that's what FXSAVE does on a 10980xe. */
6334 && iemMemAreAlignmentChecksEnabled(pVCpu))
6335 return iemRaiseAlignmentCheckException(pVCpu);
6336 else
6337 return iemRaiseGeneralProtectionFault0(pVCpu);
6338 }
6339
6340#if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64)
6341 /* If the access is atomic there are host platform alignmnet restrictions
6342 we need to conform with. */
6343 if ( !(fAccess & IEM_ACCESS_ATOMIC)
6344# if defined(RT_ARCH_AMD64)
6345 || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */
6346# elif defined(RT_ARCH_ARM64)
6347 || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */
6348# else
6349# error port me
6350# endif
6351 )
6352 { /* okay */ }
6353 else
6354 {
6355 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem));
6356 pVCpu->iem.s.cMisalignedAtomics += 1;
6357 return VINF_EM_EMULATE_SPLIT_LOCK;
6358 }
6359#endif
6360 }
6361
6362#ifdef IEM_WITH_DATA_TLB
6363 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6364
6365 /*
6366 * Get the TLB entry for this page.
6367 */
6368 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6369 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6370 if (pTlbe->uTag == uTag)
6371 {
6372# ifdef VBOX_WITH_STATISTICS
6373 pVCpu->iem.s.DataTlb.cTlbHits++;
6374# endif
6375 }
6376 else
6377 {
6378 pVCpu->iem.s.DataTlb.cTlbMisses++;
6379 PGMPTWALK Walk;
6380 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6381 if (RT_FAILURE(rc))
6382 {
6383 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6384# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6385 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6386 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6387# endif
6388 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6389 }
6390
6391 Assert(Walk.fSucceeded);
6392 pTlbe->uTag = uTag;
6393 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6394 pTlbe->GCPhys = Walk.GCPhys;
6395 pTlbe->pbMappingR3 = NULL;
6396 }
6397
6398 /*
6399 * Check TLB page table level access flags.
6400 */
6401 /* If the page is either supervisor only or non-writable, we need to do
6402 more careful access checks. */
6403 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
6404 {
6405 /* Write to read only memory? */
6406 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
6407 && (fAccess & IEM_ACCESS_TYPE_WRITE)
6408 && ( ( IEM_GET_CPL(pVCpu) == 3
6409 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6410 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
6411 {
6412 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6413# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6414 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6415 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6416# endif
6417 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6418 }
6419
6420 /* Kernel memory accessed by userland? */
6421 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6422 && IEM_GET_CPL(pVCpu) == 3
6423 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6424 {
6425 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6426# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6427 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6428 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6429# endif
6430 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6431 }
6432 }
6433
6434 /*
6435 * Set the dirty / access flags.
6436 * ASSUMES this is set when the address is translated rather than on commit...
6437 */
6438 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6439 uint64_t const fTlbAccessedDirty = (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0) | IEMTLBE_F_PT_NO_ACCESSED;
6440 if (pTlbe->fFlagsAndPhysRev & fTlbAccessedDirty)
6441 {
6442 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6443 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6444 AssertRC(rc2);
6445 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6446 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6447 pTlbe->fFlagsAndPhysRev &= ~fTlbAccessedDirty;
6448 }
6449
6450 /*
6451 * Look up the physical page info if necessary.
6452 */
6453 uint8_t *pbMem = NULL;
6454 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6455# ifdef IN_RING3
6456 pbMem = pTlbe->pbMappingR3;
6457# else
6458 pbMem = NULL;
6459# endif
6460 else
6461 {
6462 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6463 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6464 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6465 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6466 AssertCompile(PGMIEMGCPHYS2PTR_F_CODE_PAGE == IEMTLBE_F_PG_CODE_PAGE);
6467 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
6468 { /* likely */ }
6469 else
6470 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
6471 pTlbe->pbMappingR3 = NULL;
6472 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6473 | IEMTLBE_F_NO_MAPPINGR3
6474 | IEMTLBE_F_PG_NO_READ
6475 | IEMTLBE_F_PG_NO_WRITE
6476 | IEMTLBE_F_PG_UNASSIGNED
6477 | IEMTLBE_F_PG_CODE_PAGE);
6478 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6479 &pbMem, &pTlbe->fFlagsAndPhysRev);
6480 AssertRCReturn(rc, rc);
6481# ifdef IN_RING3
6482 pTlbe->pbMappingR3 = pbMem;
6483# endif
6484 }
6485
6486 /*
6487 * Check the physical page level access and mapping.
6488 */
6489 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
6490 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
6491 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
6492 { /* probably likely */ }
6493 else
6494 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem,
6495 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6496 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6497 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6498 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6499 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6500
6501 if (pbMem)
6502 {
6503 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6504 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6505 fAccess |= IEM_ACCESS_NOT_LOCKED;
6506 }
6507 else
6508 {
6509 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6510 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6511 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6512 if (rcStrict != VINF_SUCCESS)
6513 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6514 }
6515
6516 void * const pvMem = pbMem;
6517
6518 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6519 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6520 if (fAccess & IEM_ACCESS_TYPE_READ)
6521 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6522
6523#else /* !IEM_WITH_DATA_TLB */
6524
6525 RTGCPHYS GCPhysFirst;
6526 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6527 if (rcStrict != VINF_SUCCESS)
6528 return rcStrict;
6529
6530 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6531 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6532 if (fAccess & IEM_ACCESS_TYPE_READ)
6533 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6534
6535 void *pvMem;
6536 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6537 if (rcStrict != VINF_SUCCESS)
6538 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6539
6540#endif /* !IEM_WITH_DATA_TLB */
6541
6542 /*
6543 * Fill in the mapping table entry.
6544 */
6545 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6546 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6547 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6548 pVCpu->iem.s.cActiveMappings += 1;
6549
6550 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6551 *ppvMem = pvMem;
6552 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6553 AssertCompile(IEM_ACCESS_TYPE_MASK <= 0xf);
6554 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.aMemMappings) < 8);
6555
6556 return VINF_SUCCESS;
6557}
6558
6559
6560/**
6561 * Commits the guest memory if bounce buffered and unmaps it.
6562 *
6563 * @returns Strict VBox status code.
6564 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6565 * @param bUnmapInfo Unmap info set by iemMemMap.
6566 */
6567VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6568{
6569 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6570 AssertMsgReturn( (bUnmapInfo & 0x08)
6571 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6572 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf)) == ((unsigned)bUnmapInfo >> 4),
6573 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
6574 VERR_NOT_FOUND);
6575
6576 /* If it's bounce buffered, we may need to write back the buffer. */
6577 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6578 {
6579 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6580 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6581 }
6582 /* Otherwise unlock it. */
6583 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6584 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6585
6586 /* Free the entry. */
6587 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6588 Assert(pVCpu->iem.s.cActiveMappings != 0);
6589 pVCpu->iem.s.cActiveMappings--;
6590 return VINF_SUCCESS;
6591}
6592
6593
6594/**
6595 * Rolls back the guest memory (conceptually only) and unmaps it.
6596 *
6597 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6598 * @param bUnmapInfo Unmap info set by iemMemMap.
6599 */
6600void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6601{
6602 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6603 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
6604 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6605 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
6606 == ((unsigned)bUnmapInfo >> 4),
6607 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
6608
6609 /* Unlock it if necessary. */
6610 if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6611 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6612
6613 /* Free the entry. */
6614 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6615 Assert(pVCpu->iem.s.cActiveMappings != 0);
6616 pVCpu->iem.s.cActiveMappings--;
6617}
6618
6619#ifdef IEM_WITH_SETJMP
6620
6621/**
6622 * Maps the specified guest memory for the given kind of access, longjmp on
6623 * error.
6624 *
6625 * This may be using bounce buffering of the memory if it's crossing a page
6626 * boundary or if there is an access handler installed for any of it. Because
6627 * of lock prefix guarantees, we're in for some extra clutter when this
6628 * happens.
6629 *
6630 * This may raise a \#GP, \#SS, \#PF or \#AC.
6631 *
6632 * @returns Pointer to the mapped memory.
6633 *
6634 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6635 * @param bUnmapInfo Where to return unmap info to be passed to
6636 * iemMemCommitAndUnmapJmp, iemMemCommitAndUnmapRwSafeJmp,
6637 * iemMemCommitAndUnmapWoSafeJmp,
6638 * iemMemCommitAndUnmapRoSafeJmp,
6639 * iemMemRollbackAndUnmapWoSafe or iemMemRollbackAndUnmap
6640 * when done.
6641 * @param cbMem The number of bytes to map. This is usually 1,
6642 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6643 * string operations it can be up to a page.
6644 * @param iSegReg The index of the segment register to use for
6645 * this access. The base and limits are checked.
6646 * Use UINT8_MAX to indicate that no segmentation
6647 * is required (for IDT, GDT and LDT accesses).
6648 * @param GCPtrMem The address of the guest memory.
6649 * @param fAccess How the memory is being accessed. The
6650 * IEM_ACCESS_TYPE_XXX part is used to figure out how to
6651 * map the memory, while the IEM_ACCESS_WHAT_XXX part is
6652 * used when raising exceptions. The IEM_ACCESS_ATOMIC and
6653 * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be
6654 * set.
6655 * @param uAlignCtl Alignment control:
6656 * - Bits 15:0 is the alignment mask.
6657 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6658 * IEM_MEMMAP_F_ALIGN_SSE, and
6659 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6660 * Pass zero to skip alignment.
6661 */
6662void *iemMemMapJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess,
6663 uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
6664{
6665 /*
6666 * Check the input, check segment access and adjust address
6667 * with segment base.
6668 */
6669 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6670 Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE)));
6671 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6672
6673 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6674 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6675 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6676
6677 /*
6678 * Alignment check.
6679 */
6680 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6681 { /* likelyish */ }
6682 else
6683 {
6684 /* Misaligned access. */
6685 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6686 {
6687 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6688 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6689 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6690 {
6691 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6692
6693 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6694 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6695 }
6696 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6697 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6698 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6699 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6700 * that's what FXSAVE does on a 10980xe. */
6701 && iemMemAreAlignmentChecksEnabled(pVCpu))
6702 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6703 else
6704 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
6705 }
6706
6707#if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64)
6708 /* If the access is atomic there are host platform alignmnet restrictions
6709 we need to conform with. */
6710 if ( !(fAccess & IEM_ACCESS_ATOMIC)
6711# if defined(RT_ARCH_AMD64)
6712 || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */
6713# elif defined(RT_ARCH_ARM64)
6714 || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */
6715# else
6716# error port me
6717# endif
6718 )
6719 { /* okay */ }
6720 else
6721 {
6722 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem));
6723 pVCpu->iem.s.cMisalignedAtomics += 1;
6724 IEM_DO_LONGJMP(pVCpu, VINF_EM_EMULATE_SPLIT_LOCK);
6725 }
6726#endif
6727 }
6728
6729 /*
6730 * Figure out which mapping entry to use.
6731 */
6732 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6733 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6734 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6735 {
6736 iMemMap = iemMemMapFindFree(pVCpu);
6737 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6738 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6739 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6740 pVCpu->iem.s.aMemMappings[2].fAccess),
6741 IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_9));
6742 }
6743
6744 /*
6745 * Crossing a page boundary?
6746 */
6747 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
6748 { /* No (likely). */ }
6749 else
6750 {
6751 void *pvMem;
6752 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
6753 if (rcStrict == VINF_SUCCESS)
6754 return pvMem;
6755 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6756 }
6757
6758#ifdef IEM_WITH_DATA_TLB
6759 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6760
6761 /*
6762 * Get the TLB entry for this page.
6763 */
6764 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6765 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6766 if (pTlbe->uTag == uTag)
6767 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6768 else
6769 {
6770 pVCpu->iem.s.DataTlb.cTlbMisses++;
6771 PGMPTWALK Walk;
6772 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6773 if (RT_FAILURE(rc))
6774 {
6775 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6776# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6777 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6778 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6779# endif
6780 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6781 }
6782
6783 Assert(Walk.fSucceeded);
6784 pTlbe->uTag = uTag;
6785 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6786 pTlbe->GCPhys = Walk.GCPhys;
6787 pTlbe->pbMappingR3 = NULL;
6788 }
6789
6790 /*
6791 * Check the flags and physical revision.
6792 */
6793 /** @todo make the caller pass these in with fAccess. */
6794 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && IEM_GET_CPL(pVCpu) == 3
6795 ? IEMTLBE_F_PT_NO_USER : 0;
6796 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
6797 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
6798 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
6799 || (IEM_GET_CPL(pVCpu) == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6800 ? IEMTLBE_F_PT_NO_WRITE : 0)
6801 : 0;
6802 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
6803 uint8_t *pbMem = NULL;
6804 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
6805 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6806# ifdef IN_RING3
6807 pbMem = pTlbe->pbMappingR3;
6808# else
6809 pbMem = NULL;
6810# endif
6811 else
6812 {
6813 /*
6814 * Okay, something isn't quite right or needs refreshing.
6815 */
6816 /* Write to read only memory? */
6817 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
6818 {
6819 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6820# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6821 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6822 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6823# endif
6824 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6825 }
6826
6827 /* Kernel memory accessed by userland? */
6828 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
6829 {
6830 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6831# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6832 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6833 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6834# endif
6835 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6836 }
6837
6838 /* Set the dirty / access flags.
6839 ASSUMES this is set when the address is translated rather than on commit... */
6840 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6841 if (pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED))
6842 {
6843 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6844 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6845 AssertRC(rc2);
6846 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6847 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6848 pTlbe->fFlagsAndPhysRev &= ~((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED);
6849 }
6850
6851 /*
6852 * Check if the physical page info needs updating.
6853 */
6854 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6855# ifdef IN_RING3
6856 pbMem = pTlbe->pbMappingR3;
6857# else
6858 pbMem = NULL;
6859# endif
6860 else
6861 {
6862 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6863 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6864 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6865 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6866 AssertCompile(PGMIEMGCPHYS2PTR_F_CODE_PAGE == IEMTLBE_F_PG_CODE_PAGE);
6867 pTlbe->pbMappingR3 = NULL;
6868 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6869 | IEMTLBE_F_NO_MAPPINGR3
6870 | IEMTLBE_F_PG_NO_READ
6871 | IEMTLBE_F_PG_NO_WRITE
6872 | IEMTLBE_F_PG_UNASSIGNED
6873 | IEMTLBE_F_PG_CODE_PAGE);
6874 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6875 &pbMem, &pTlbe->fFlagsAndPhysRev);
6876 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
6877# ifdef IN_RING3
6878 pTlbe->pbMappingR3 = pbMem;
6879# endif
6880 }
6881
6882 /*
6883 * Check the physical page level access and mapping.
6884 */
6885 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
6886 { /* probably likely */ }
6887 else
6888 {
6889 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, pbUnmapInfo, cbMem,
6890 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6891 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6892 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6893 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6894 if (rcStrict == VINF_SUCCESS)
6895 return pbMem;
6896 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6897 }
6898 }
6899 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6900
6901 if (pbMem)
6902 {
6903 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6904 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6905 fAccess |= IEM_ACCESS_NOT_LOCKED;
6906 }
6907 else
6908 {
6909 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6910 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6911 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6912 if (rcStrict == VINF_SUCCESS)
6913 {
6914 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6915 return pbMem;
6916 }
6917 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6918 }
6919
6920 void * const pvMem = pbMem;
6921
6922 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6923 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6924 if (fAccess & IEM_ACCESS_TYPE_READ)
6925 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6926
6927#else /* !IEM_WITH_DATA_TLB */
6928
6929
6930 RTGCPHYS GCPhysFirst;
6931 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6932 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6933 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6934
6935 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6936 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6937 if (fAccess & IEM_ACCESS_TYPE_READ)
6938 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6939
6940 void *pvMem;
6941 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6942 if (rcStrict == VINF_SUCCESS)
6943 { /* likely */ }
6944 else
6945 {
6946 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6947 if (rcStrict == VINF_SUCCESS)
6948 return pvMem;
6949 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6950 }
6951
6952#endif /* !IEM_WITH_DATA_TLB */
6953
6954 /*
6955 * Fill in the mapping table entry.
6956 */
6957 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6958 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6959 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6960 pVCpu->iem.s.cActiveMappings++;
6961
6962 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6963
6964 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6965 return pvMem;
6966}
6967
6968
6969/**
6970 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
6971 *
6972 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6973 * @param pvMem The mapping.
6974 * @param fAccess The kind of access.
6975 */
6976void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
6977{
6978 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6979 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
6980 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6981 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
6982 == ((unsigned)bUnmapInfo >> 4),
6983 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
6984
6985 /* If it's bounce buffered, we may need to write back the buffer. */
6986 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6987 {
6988 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6989 {
6990 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6991 if (rcStrict == VINF_SUCCESS)
6992 return;
6993 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6994 }
6995 }
6996 /* Otherwise unlock it. */
6997 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6998 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6999
7000 /* Free the entry. */
7001 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7002 Assert(pVCpu->iem.s.cActiveMappings != 0);
7003 pVCpu->iem.s.cActiveMappings--;
7004}
7005
7006
7007/** Fallback for iemMemCommitAndUnmapRwJmp. */
7008void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7009{
7010 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
7011 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7012}
7013
7014
7015/** Fallback for iemMemCommitAndUnmapAtJmp. */
7016void iemMemCommitAndUnmapAtSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7017{
7018 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
7019 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7020}
7021
7022
7023/** Fallback for iemMemCommitAndUnmapWoJmp. */
7024void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7025{
7026 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
7027 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7028}
7029
7030
7031/** Fallback for iemMemCommitAndUnmapRoJmp. */
7032void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7033{
7034 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_READ);
7035 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7036}
7037
7038
7039/** Fallback for iemMemRollbackAndUnmapWo. */
7040void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7041{
7042 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
7043 iemMemRollbackAndUnmap(pVCpu, bUnmapInfo);
7044}
7045
7046#endif /* IEM_WITH_SETJMP */
7047
7048#ifndef IN_RING3
7049/**
7050 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
7051 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
7052 *
7053 * Allows the instruction to be completed and retired, while the IEM user will
7054 * return to ring-3 immediately afterwards and do the postponed writes there.
7055 *
7056 * @returns VBox status code (no strict statuses). Caller must check
7057 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
7058 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7059 * @param pvMem The mapping.
7060 * @param fAccess The kind of access.
7061 */
7062VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7063{
7064 uintptr_t const iMemMap = bUnmapInfo & 0x7;
7065 AssertMsgReturn( (bUnmapInfo & 0x08)
7066 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
7067 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
7068 == ((unsigned)bUnmapInfo >> 4),
7069 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
7070 VERR_NOT_FOUND);
7071
7072 /* If it's bounce buffered, we may need to write back the buffer. */
7073 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7074 {
7075 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7076 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
7077 }
7078 /* Otherwise unlock it. */
7079 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
7080 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7081
7082 /* Free the entry. */
7083 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7084 Assert(pVCpu->iem.s.cActiveMappings != 0);
7085 pVCpu->iem.s.cActiveMappings--;
7086 return VINF_SUCCESS;
7087}
7088#endif
7089
7090
7091/**
7092 * Rollbacks mappings, releasing page locks and such.
7093 *
7094 * The caller shall only call this after checking cActiveMappings.
7095 *
7096 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7097 */
7098void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
7099{
7100 Assert(pVCpu->iem.s.cActiveMappings > 0);
7101
7102 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
7103 while (iMemMap-- > 0)
7104 {
7105 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
7106 if (fAccess != IEM_ACCESS_INVALID)
7107 {
7108 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
7109 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7110 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
7111 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7112 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
7113 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
7114 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
7115 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
7116 pVCpu->iem.s.cActiveMappings--;
7117 }
7118 }
7119}
7120
7121
7122/*
7123 * Instantiate R/W templates.
7124 */
7125#define TMPL_MEM_WITH_STACK
7126
7127#define TMPL_MEM_TYPE uint8_t
7128#define TMPL_MEM_FN_SUFF U8
7129#define TMPL_MEM_FMT_TYPE "%#04x"
7130#define TMPL_MEM_FMT_DESC "byte"
7131#include "IEMAllMemRWTmpl.cpp.h"
7132
7133#define TMPL_MEM_TYPE uint16_t
7134#define TMPL_MEM_FN_SUFF U16
7135#define TMPL_MEM_FMT_TYPE "%#06x"
7136#define TMPL_MEM_FMT_DESC "word"
7137#include "IEMAllMemRWTmpl.cpp.h"
7138
7139#define TMPL_WITH_PUSH_SREG
7140#define TMPL_MEM_TYPE uint32_t
7141#define TMPL_MEM_FN_SUFF U32
7142#define TMPL_MEM_FMT_TYPE "%#010x"
7143#define TMPL_MEM_FMT_DESC "dword"
7144#include "IEMAllMemRWTmpl.cpp.h"
7145#undef TMPL_WITH_PUSH_SREG
7146
7147#define TMPL_MEM_TYPE uint64_t
7148#define TMPL_MEM_FN_SUFF U64
7149#define TMPL_MEM_FMT_TYPE "%#018RX64"
7150#define TMPL_MEM_FMT_DESC "qword"
7151#include "IEMAllMemRWTmpl.cpp.h"
7152
7153#undef TMPL_MEM_WITH_STACK
7154
7155#define TMPL_MEM_TYPE uint64_t
7156#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) * 2 - 1)
7157#define TMPL_MEM_FN_SUFF U64AlignedU128
7158#define TMPL_MEM_FMT_TYPE "%#018RX64"
7159#define TMPL_MEM_FMT_DESC "qword"
7160#include "IEMAllMemRWTmpl.cpp.h"
7161
7162/* See IEMAllMemRWTmplInline.cpp.h */
7163#define TMPL_MEM_BY_REF
7164
7165#define TMPL_MEM_TYPE RTFLOAT80U
7166#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1)
7167#define TMPL_MEM_FN_SUFF R80
7168#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
7169#define TMPL_MEM_FMT_DESC "tword"
7170#include "IEMAllMemRWTmpl.cpp.h"
7171
7172#define TMPL_MEM_TYPE RTPBCD80U
7173#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1) /** @todo testcase: 80-bit BCD alignment */
7174#define TMPL_MEM_FN_SUFF D80
7175#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
7176#define TMPL_MEM_FMT_DESC "tword"
7177#include "IEMAllMemRWTmpl.cpp.h"
7178
7179#define TMPL_MEM_TYPE RTUINT128U
7180#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT128U) - 1)
7181#define TMPL_MEM_FN_SUFF U128
7182#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7183#define TMPL_MEM_FMT_DESC "dqword"
7184#include "IEMAllMemRWTmpl.cpp.h"
7185
7186#define TMPL_MEM_TYPE RTUINT128U
7187#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT128U) - 1)
7188#define TMPL_MEM_MAP_FLAGS_ADD (IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE)
7189#define TMPL_MEM_FN_SUFF U128AlignedSse
7190#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7191#define TMPL_MEM_FMT_DESC "dqword"
7192#include "IEMAllMemRWTmpl.cpp.h"
7193
7194#define TMPL_MEM_TYPE RTUINT128U
7195#define TMPL_MEM_TYPE_ALIGN 0
7196#define TMPL_MEM_FN_SUFF U128NoAc
7197#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7198#define TMPL_MEM_FMT_DESC "dqword"
7199#include "IEMAllMemRWTmpl.cpp.h"
7200
7201#define TMPL_MEM_TYPE RTUINT256U
7202#define TMPL_MEM_TYPE_ALIGN 0
7203#define TMPL_MEM_FN_SUFF U256NoAc
7204#define TMPL_MEM_FMT_TYPE "%.32Rhxs"
7205#define TMPL_MEM_FMT_DESC "qqword"
7206#include "IEMAllMemRWTmpl.cpp.h"
7207
7208#define TMPL_MEM_TYPE RTUINT256U
7209#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT256U) - 1)
7210#define TMPL_MEM_MAP_FLAGS_ADD IEM_MEMMAP_F_ALIGN_GP
7211#define TMPL_MEM_FN_SUFF U256AlignedAvx
7212#define TMPL_MEM_FMT_TYPE "%.32Rhxs"
7213#define TMPL_MEM_FMT_DESC "qqword"
7214#include "IEMAllMemRWTmpl.cpp.h"
7215
7216/**
7217 * Fetches a data dword and zero extends it to a qword.
7218 *
7219 * @returns Strict VBox status code.
7220 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7221 * @param pu64Dst Where to return the qword.
7222 * @param iSegReg The index of the segment register to use for
7223 * this access. The base and limits are checked.
7224 * @param GCPtrMem The address of the guest memory.
7225 */
7226VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7227{
7228 /* The lazy approach for now... */
7229 uint8_t bUnmapInfo;
7230 uint32_t const *pu32Src;
7231 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem,
7232 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7233 if (rc == VINF_SUCCESS)
7234 {
7235 *pu64Dst = *pu32Src;
7236 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7237 Log(("IEM RD dword %d|%RGv: %#010RX64\n", iSegReg, GCPtrMem, *pu64Dst));
7238 }
7239 return rc;
7240}
7241
7242
7243#ifdef SOME_UNUSED_FUNCTION
7244/**
7245 * Fetches a data dword and sign extends it to a qword.
7246 *
7247 * @returns Strict VBox status code.
7248 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7249 * @param pu64Dst Where to return the sign extended value.
7250 * @param iSegReg The index of the segment register to use for
7251 * this access. The base and limits are checked.
7252 * @param GCPtrMem The address of the guest memory.
7253 */
7254VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7255{
7256 /* The lazy approach for now... */
7257 uint8_t bUnmapInfo;
7258 int32_t const *pi32Src;
7259 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, &bUnmapInfo, sizeof(*pi32Src), iSegReg, GCPtrMem,
7260 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
7261 if (rc == VINF_SUCCESS)
7262 {
7263 *pu64Dst = *pi32Src;
7264 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7265 Log(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, (uint32_t)*pu64Dst));
7266 }
7267#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7268 else
7269 *pu64Dst = 0;
7270#endif
7271 return rc;
7272}
7273#endif
7274
7275
7276/**
7277 * Fetches a descriptor register (lgdt, lidt).
7278 *
7279 * @returns Strict VBox status code.
7280 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7281 * @param pcbLimit Where to return the limit.
7282 * @param pGCPtrBase Where to return the base.
7283 * @param iSegReg The index of the segment register to use for
7284 * this access. The base and limits are checked.
7285 * @param GCPtrMem The address of the guest memory.
7286 * @param enmOpSize The effective operand size.
7287 */
7288VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7289 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7290{
7291 /*
7292 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7293 * little special:
7294 * - The two reads are done separately.
7295 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7296 * - We suspect the 386 to actually commit the limit before the base in
7297 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7298 * don't try emulate this eccentric behavior, because it's not well
7299 * enough understood and rather hard to trigger.
7300 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7301 */
7302 VBOXSTRICTRC rcStrict;
7303 if (IEM_IS_64BIT_CODE(pVCpu))
7304 {
7305 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7306 if (rcStrict == VINF_SUCCESS)
7307 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7308 }
7309 else
7310 {
7311 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7312 if (enmOpSize == IEMMODE_32BIT)
7313 {
7314 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7315 {
7316 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7317 if (rcStrict == VINF_SUCCESS)
7318 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7319 }
7320 else
7321 {
7322 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7323 if (rcStrict == VINF_SUCCESS)
7324 {
7325 *pcbLimit = (uint16_t)uTmp;
7326 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7327 }
7328 }
7329 if (rcStrict == VINF_SUCCESS)
7330 *pGCPtrBase = uTmp;
7331 }
7332 else
7333 {
7334 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7335 if (rcStrict == VINF_SUCCESS)
7336 {
7337 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7338 if (rcStrict == VINF_SUCCESS)
7339 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7340 }
7341 }
7342 }
7343 return rcStrict;
7344}
7345
7346
7347/**
7348 * Stores a data dqword, SSE aligned.
7349 *
7350 * @returns Strict VBox status code.
7351 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7352 * @param iSegReg The index of the segment register to use for
7353 * this access. The base and limits are checked.
7354 * @param GCPtrMem The address of the guest memory.
7355 * @param u128Value The value to store.
7356 */
7357VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7358{
7359 /* The lazy approach for now... */
7360 uint8_t bUnmapInfo;
7361 PRTUINT128U pu128Dst;
7362 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7363 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7364 if (rc == VINF_SUCCESS)
7365 {
7366 pu128Dst->au64[0] = u128Value.au64[0];
7367 pu128Dst->au64[1] = u128Value.au64[1];
7368 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7369 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7370 }
7371 return rc;
7372}
7373
7374
7375#ifdef IEM_WITH_SETJMP
7376/**
7377 * Stores a data dqword, SSE aligned.
7378 *
7379 * @returns Strict VBox status code.
7380 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7381 * @param iSegReg The index of the segment register to use for
7382 * this access. The base and limits are checked.
7383 * @param GCPtrMem The address of the guest memory.
7384 * @param u128Value The value to store.
7385 */
7386void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7387 RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7388{
7389 /* The lazy approach for now... */
7390 uint8_t bUnmapInfo;
7391 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7392 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7393 pu128Dst->au64[0] = u128Value.au64[0];
7394 pu128Dst->au64[1] = u128Value.au64[1];
7395 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7396 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7397}
7398#endif
7399
7400
7401/**
7402 * Stores a data dqword.
7403 *
7404 * @returns Strict VBox status code.
7405 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7406 * @param iSegReg The index of the segment register to use for
7407 * this access. The base and limits are checked.
7408 * @param GCPtrMem The address of the guest memory.
7409 * @param pu256Value Pointer to the value to store.
7410 */
7411VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7412{
7413 /* The lazy approach for now... */
7414 uint8_t bUnmapInfo;
7415 PRTUINT256U pu256Dst;
7416 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7417 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7418 if (rc == VINF_SUCCESS)
7419 {
7420 pu256Dst->au64[0] = pu256Value->au64[0];
7421 pu256Dst->au64[1] = pu256Value->au64[1];
7422 pu256Dst->au64[2] = pu256Value->au64[2];
7423 pu256Dst->au64[3] = pu256Value->au64[3];
7424 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7425 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7426 }
7427 return rc;
7428}
7429
7430
7431#ifdef IEM_WITH_SETJMP
7432/**
7433 * Stores a data dqword, longjmp on error.
7434 *
7435 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7436 * @param iSegReg The index of the segment register to use for
7437 * this access. The base and limits are checked.
7438 * @param GCPtrMem The address of the guest memory.
7439 * @param pu256Value Pointer to the value to store.
7440 */
7441void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7442{
7443 /* The lazy approach for now... */
7444 uint8_t bUnmapInfo;
7445 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7446 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7447 pu256Dst->au64[0] = pu256Value->au64[0];
7448 pu256Dst->au64[1] = pu256Value->au64[1];
7449 pu256Dst->au64[2] = pu256Value->au64[2];
7450 pu256Dst->au64[3] = pu256Value->au64[3];
7451 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7452 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7453}
7454#endif
7455
7456
7457/**
7458 * Stores a descriptor register (sgdt, sidt).
7459 *
7460 * @returns Strict VBox status code.
7461 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7462 * @param cbLimit The limit.
7463 * @param GCPtrBase The base address.
7464 * @param iSegReg The index of the segment register to use for
7465 * this access. The base and limits are checked.
7466 * @param GCPtrMem The address of the guest memory.
7467 */
7468VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7469{
7470 /*
7471 * The SIDT and SGDT instructions actually stores the data using two
7472 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
7473 * does not respond to opsize prefixes.
7474 */
7475 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
7476 if (rcStrict == VINF_SUCCESS)
7477 {
7478 if (IEM_IS_16BIT_CODE(pVCpu))
7479 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
7480 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
7481 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
7482 else if (IEM_IS_32BIT_CODE(pVCpu))
7483 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
7484 else
7485 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
7486 }
7487 return rcStrict;
7488}
7489
7490
7491/**
7492 * Begin a special stack push (used by interrupt, exceptions and such).
7493 *
7494 * This will raise \#SS or \#PF if appropriate.
7495 *
7496 * @returns Strict VBox status code.
7497 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7498 * @param cbMem The number of bytes to push onto the stack.
7499 * @param cbAlign The alignment mask (7, 3, 1).
7500 * @param ppvMem Where to return the pointer to the stack memory.
7501 * As with the other memory functions this could be
7502 * direct access or bounce buffered access, so
7503 * don't commit register until the commit call
7504 * succeeds.
7505 * @param pbUnmapInfo Where to store unmap info for
7506 * iemMemStackPushCommitSpecial.
7507 * @param puNewRsp Where to return the new RSP value. This must be
7508 * passed unchanged to
7509 * iemMemStackPushCommitSpecial().
7510 */
7511VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
7512 void **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
7513{
7514 Assert(cbMem < UINT8_MAX);
7515 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
7516 return iemMemMap(pVCpu, ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W, cbAlign);
7517}
7518
7519
7520/**
7521 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
7522 *
7523 * This will update the rSP.
7524 *
7525 * @returns Strict VBox status code.
7526 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7527 * @param bUnmapInfo Unmap info set by iemMemStackPushBeginSpecial.
7528 * @param uNewRsp The new RSP value returned by
7529 * iemMemStackPushBeginSpecial().
7530 */
7531VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo, uint64_t uNewRsp) RT_NOEXCEPT
7532{
7533 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7534 if (rcStrict == VINF_SUCCESS)
7535 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7536 return rcStrict;
7537}
7538
7539
7540/**
7541 * Begin a special stack pop (used by iret, retf and such).
7542 *
7543 * This will raise \#SS or \#PF if appropriate.
7544 *
7545 * @returns Strict VBox status code.
7546 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7547 * @param cbMem The number of bytes to pop from the stack.
7548 * @param cbAlign The alignment mask (7, 3, 1).
7549 * @param ppvMem Where to return the pointer to the stack memory.
7550 * @param pbUnmapInfo Where to store unmap info for
7551 * iemMemStackPopDoneSpecial.
7552 * @param puNewRsp Where to return the new RSP value. This must be
7553 * assigned to CPUMCTX::rsp manually some time
7554 * after iemMemStackPopDoneSpecial() has been
7555 * called.
7556 */
7557VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
7558 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
7559{
7560 Assert(cbMem < UINT8_MAX);
7561 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
7562 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
7563}
7564
7565
7566/**
7567 * Continue a special stack pop (used by iret and retf), for the purpose of
7568 * retrieving a new stack pointer.
7569 *
7570 * This will raise \#SS or \#PF if appropriate.
7571 *
7572 * @returns Strict VBox status code.
7573 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7574 * @param off Offset from the top of the stack. This is zero
7575 * except in the retf case.
7576 * @param cbMem The number of bytes to pop from the stack.
7577 * @param ppvMem Where to return the pointer to the stack memory.
7578 * @param pbUnmapInfo Where to store unmap info for
7579 * iemMemStackPopDoneSpecial.
7580 * @param uCurNewRsp The current uncommitted RSP value. (No need to
7581 * return this because all use of this function is
7582 * to retrieve a new value and anything we return
7583 * here would be discarded.)
7584 */
7585VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
7586 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t uCurNewRsp) RT_NOEXCEPT
7587{
7588 Assert(cbMem < UINT8_MAX);
7589
7590 /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */
7591 RTGCPTR GCPtrTop;
7592 if (IEM_IS_64BIT_CODE(pVCpu))
7593 GCPtrTop = uCurNewRsp;
7594 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
7595 GCPtrTop = (uint32_t)uCurNewRsp;
7596 else
7597 GCPtrTop = (uint16_t)uCurNewRsp;
7598
7599 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
7600 0 /* checked in iemMemStackPopBeginSpecial */);
7601}
7602
7603
7604/**
7605 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
7606 * iemMemStackPopContinueSpecial).
7607 *
7608 * The caller will manually commit the rSP.
7609 *
7610 * @returns Strict VBox status code.
7611 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7612 * @param bUnmapInfo Unmap information returned by
7613 * iemMemStackPopBeginSpecial() or
7614 * iemMemStackPopContinueSpecial().
7615 */
7616VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7617{
7618 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7619}
7620
7621
7622/**
7623 * Fetches a system table byte.
7624 *
7625 * @returns Strict VBox status code.
7626 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7627 * @param pbDst Where to return the byte.
7628 * @param iSegReg The index of the segment register to use for
7629 * this access. The base and limits are checked.
7630 * @param GCPtrMem The address of the guest memory.
7631 */
7632VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7633{
7634 /* The lazy approach for now... */
7635 uint8_t bUnmapInfo;
7636 uint8_t const *pbSrc;
7637 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, &bUnmapInfo, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7638 if (rc == VINF_SUCCESS)
7639 {
7640 *pbDst = *pbSrc;
7641 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7642 }
7643 return rc;
7644}
7645
7646
7647/**
7648 * Fetches a system table word.
7649 *
7650 * @returns Strict VBox status code.
7651 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7652 * @param pu16Dst Where to return the word.
7653 * @param iSegReg The index of the segment register to use for
7654 * this access. The base and limits are checked.
7655 * @param GCPtrMem The address of the guest memory.
7656 */
7657VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7658{
7659 /* The lazy approach for now... */
7660 uint8_t bUnmapInfo;
7661 uint16_t const *pu16Src;
7662 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, &bUnmapInfo, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7663 if (rc == VINF_SUCCESS)
7664 {
7665 *pu16Dst = *pu16Src;
7666 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7667 }
7668 return rc;
7669}
7670
7671
7672/**
7673 * Fetches a system table dword.
7674 *
7675 * @returns Strict VBox status code.
7676 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7677 * @param pu32Dst Where to return the dword.
7678 * @param iSegReg The index of the segment register to use for
7679 * this access. The base and limits are checked.
7680 * @param GCPtrMem The address of the guest memory.
7681 */
7682VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7683{
7684 /* The lazy approach for now... */
7685 uint8_t bUnmapInfo;
7686 uint32_t const *pu32Src;
7687 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7688 if (rc == VINF_SUCCESS)
7689 {
7690 *pu32Dst = *pu32Src;
7691 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7692 }
7693 return rc;
7694}
7695
7696
7697/**
7698 * Fetches a system table qword.
7699 *
7700 * @returns Strict VBox status code.
7701 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7702 * @param pu64Dst Where to return the qword.
7703 * @param iSegReg The index of the segment register to use for
7704 * this access. The base and limits are checked.
7705 * @param GCPtrMem The address of the guest memory.
7706 */
7707VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7708{
7709 /* The lazy approach for now... */
7710 uint8_t bUnmapInfo;
7711 uint64_t const *pu64Src;
7712 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, &bUnmapInfo, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7713 if (rc == VINF_SUCCESS)
7714 {
7715 *pu64Dst = *pu64Src;
7716 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7717 }
7718 return rc;
7719}
7720
7721
7722/**
7723 * Fetches a descriptor table entry with caller specified error code.
7724 *
7725 * @returns Strict VBox status code.
7726 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7727 * @param pDesc Where to return the descriptor table entry.
7728 * @param uSel The selector which table entry to fetch.
7729 * @param uXcpt The exception to raise on table lookup error.
7730 * @param uErrorCode The error code associated with the exception.
7731 */
7732static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
7733 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
7734{
7735 AssertPtr(pDesc);
7736 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
7737
7738 /** @todo did the 286 require all 8 bytes to be accessible? */
7739 /*
7740 * Get the selector table base and check bounds.
7741 */
7742 RTGCPTR GCPtrBase;
7743 if (uSel & X86_SEL_LDT)
7744 {
7745 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
7746 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
7747 {
7748 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
7749 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
7750 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
7751 uErrorCode, 0);
7752 }
7753
7754 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
7755 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
7756 }
7757 else
7758 {
7759 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
7760 {
7761 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
7762 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
7763 uErrorCode, 0);
7764 }
7765 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
7766 }
7767
7768 /*
7769 * Read the legacy descriptor and maybe the long mode extensions if
7770 * required.
7771 */
7772 VBOXSTRICTRC rcStrict;
7773 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
7774 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
7775 else
7776 {
7777 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
7778 if (rcStrict == VINF_SUCCESS)
7779 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
7780 if (rcStrict == VINF_SUCCESS)
7781 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
7782 if (rcStrict == VINF_SUCCESS)
7783 pDesc->Legacy.au16[3] = 0;
7784 else
7785 return rcStrict;
7786 }
7787
7788 if (rcStrict == VINF_SUCCESS)
7789 {
7790 if ( !IEM_IS_LONG_MODE(pVCpu)
7791 || pDesc->Legacy.Gen.u1DescType)
7792 pDesc->Long.au64[1] = 0;
7793 else if ( (uint32_t)(uSel | X86_SEL_RPL_LDT) + 8
7794 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
7795 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
7796 else
7797 {
7798 LogEx(LOG_GROUP_IEM,("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
7799 /** @todo is this the right exception? */
7800 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
7801 }
7802 }
7803 return rcStrict;
7804}
7805
7806
7807/**
7808 * Fetches a descriptor table entry.
7809 *
7810 * @returns Strict VBox status code.
7811 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7812 * @param pDesc Where to return the descriptor table entry.
7813 * @param uSel The selector which table entry to fetch.
7814 * @param uXcpt The exception to raise on table lookup error.
7815 */
7816VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
7817{
7818 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
7819}
7820
7821
7822/**
7823 * Marks the selector descriptor as accessed (only non-system descriptors).
7824 *
7825 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
7826 * will therefore skip the limit checks.
7827 *
7828 * @returns Strict VBox status code.
7829 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7830 * @param uSel The selector.
7831 */
7832VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
7833{
7834 /*
7835 * Get the selector table base and calculate the entry address.
7836 */
7837 RTGCPTR GCPtr = uSel & X86_SEL_LDT
7838 ? pVCpu->cpum.GstCtx.ldtr.u64Base
7839 : pVCpu->cpum.GstCtx.gdtr.pGdt;
7840 GCPtr += uSel & X86_SEL_MASK;
7841
7842 /*
7843 * ASMAtomicBitSet will assert if the address is misaligned, so do some
7844 * ugly stuff to avoid this. This will make sure it's an atomic access
7845 * as well more or less remove any question about 8-bit or 32-bit accesss.
7846 */
7847 VBOXSTRICTRC rcStrict;
7848 uint8_t bUnmapInfo;
7849 uint32_t volatile *pu32;
7850 if ((GCPtr & 3) == 0)
7851 {
7852 /* The normal case, map the 32-bit bits around the accessed bit (40). */
7853 GCPtr += 2 + 2;
7854 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
7855 if (rcStrict != VINF_SUCCESS)
7856 return rcStrict;
7857 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
7858 }
7859 else
7860 {
7861 /* The misaligned GDT/LDT case, map the whole thing. */
7862 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
7863 if (rcStrict != VINF_SUCCESS)
7864 return rcStrict;
7865 switch ((uintptr_t)pu32 & 3)
7866 {
7867 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
7868 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
7869 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
7870 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
7871 }
7872 }
7873
7874 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7875}
7876
7877
7878#undef LOG_GROUP
7879#define LOG_GROUP LOG_GROUP_IEM
7880
7881/** @} */
7882
7883/** @name Opcode Helpers.
7884 * @{
7885 */
7886
7887/**
7888 * Calculates the effective address of a ModR/M memory operand.
7889 *
7890 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
7891 *
7892 * @return Strict VBox status code.
7893 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7894 * @param bRm The ModRM byte.
7895 * @param cbImmAndRspOffset - First byte: The size of any immediate
7896 * following the effective address opcode bytes
7897 * (only for RIP relative addressing).
7898 * - Second byte: RSP displacement (for POP [ESP]).
7899 * @param pGCPtrEff Where to return the effective address.
7900 */
7901VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
7902{
7903 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
7904# define SET_SS_DEF() \
7905 do \
7906 { \
7907 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
7908 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
7909 } while (0)
7910
7911 if (!IEM_IS_64BIT_CODE(pVCpu))
7912 {
7913/** @todo Check the effective address size crap! */
7914 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
7915 {
7916 uint16_t u16EffAddr;
7917
7918 /* Handle the disp16 form with no registers first. */
7919 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
7920 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
7921 else
7922 {
7923 /* Get the displacment. */
7924 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
7925 {
7926 case 0: u16EffAddr = 0; break;
7927 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
7928 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
7929 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
7930 }
7931
7932 /* Add the base and index registers to the disp. */
7933 switch (bRm & X86_MODRM_RM_MASK)
7934 {
7935 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
7936 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
7937 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
7938 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
7939 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
7940 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
7941 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
7942 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
7943 }
7944 }
7945
7946 *pGCPtrEff = u16EffAddr;
7947 }
7948 else
7949 {
7950 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
7951 uint32_t u32EffAddr;
7952
7953 /* Handle the disp32 form with no registers first. */
7954 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
7955 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
7956 else
7957 {
7958 /* Get the register (or SIB) value. */
7959 switch ((bRm & X86_MODRM_RM_MASK))
7960 {
7961 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
7962 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
7963 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
7964 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
7965 case 4: /* SIB */
7966 {
7967 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
7968
7969 /* Get the index and scale it. */
7970 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
7971 {
7972 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
7973 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
7974 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
7975 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
7976 case 4: u32EffAddr = 0; /*none */ break;
7977 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
7978 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
7979 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
7980 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7981 }
7982 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
7983
7984 /* add base */
7985 switch (bSib & X86_SIB_BASE_MASK)
7986 {
7987 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
7988 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
7989 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
7990 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
7991 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
7992 case 5:
7993 if ((bRm & X86_MODRM_MOD_MASK) != 0)
7994 {
7995 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
7996 SET_SS_DEF();
7997 }
7998 else
7999 {
8000 uint32_t u32Disp;
8001 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8002 u32EffAddr += u32Disp;
8003 }
8004 break;
8005 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8006 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8007 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8008 }
8009 break;
8010 }
8011 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8012 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8013 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8014 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8015 }
8016
8017 /* Get and add the displacement. */
8018 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8019 {
8020 case 0:
8021 break;
8022 case 1:
8023 {
8024 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8025 u32EffAddr += i8Disp;
8026 break;
8027 }
8028 case 2:
8029 {
8030 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8031 u32EffAddr += u32Disp;
8032 break;
8033 }
8034 default:
8035 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8036 }
8037
8038 }
8039 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8040 *pGCPtrEff = u32EffAddr;
8041 }
8042 }
8043 else
8044 {
8045 uint64_t u64EffAddr;
8046
8047 /* Handle the rip+disp32 form with no registers first. */
8048 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8049 {
8050 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8051 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8052 }
8053 else
8054 {
8055 /* Get the register (or SIB) value. */
8056 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8057 {
8058 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8059 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8060 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8061 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8062 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8063 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8064 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8065 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8066 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8067 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8068 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8069 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8070 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8071 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8072 /* SIB */
8073 case 4:
8074 case 12:
8075 {
8076 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8077
8078 /* Get the index and scale it. */
8079 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8080 {
8081 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8082 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8083 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8084 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8085 case 4: u64EffAddr = 0; /*none */ break;
8086 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8087 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8088 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8089 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8090 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8091 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8092 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8093 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8094 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8095 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8096 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8097 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8098 }
8099 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8100
8101 /* add base */
8102 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8103 {
8104 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8105 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8106 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8107 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8108 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8109 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8110 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8111 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8112 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8113 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8114 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8115 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8116 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8117 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8118 /* complicated encodings */
8119 case 5:
8120 case 13:
8121 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8122 {
8123 if (!pVCpu->iem.s.uRexB)
8124 {
8125 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8126 SET_SS_DEF();
8127 }
8128 else
8129 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8130 }
8131 else
8132 {
8133 uint32_t u32Disp;
8134 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8135 u64EffAddr += (int32_t)u32Disp;
8136 }
8137 break;
8138 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8139 }
8140 break;
8141 }
8142 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8143 }
8144
8145 /* Get and add the displacement. */
8146 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8147 {
8148 case 0:
8149 break;
8150 case 1:
8151 {
8152 int8_t i8Disp;
8153 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8154 u64EffAddr += i8Disp;
8155 break;
8156 }
8157 case 2:
8158 {
8159 uint32_t u32Disp;
8160 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8161 u64EffAddr += (int32_t)u32Disp;
8162 break;
8163 }
8164 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8165 }
8166
8167 }
8168
8169 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8170 *pGCPtrEff = u64EffAddr;
8171 else
8172 {
8173 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8174 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8175 }
8176 }
8177
8178 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
8179 return VINF_SUCCESS;
8180}
8181
8182
8183#ifdef IEM_WITH_SETJMP
8184/**
8185 * Calculates the effective address of a ModR/M memory operand.
8186 *
8187 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8188 *
8189 * May longjmp on internal error.
8190 *
8191 * @return The effective address.
8192 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8193 * @param bRm The ModRM byte.
8194 * @param cbImmAndRspOffset - First byte: The size of any immediate
8195 * following the effective address opcode bytes
8196 * (only for RIP relative addressing).
8197 * - Second byte: RSP displacement (for POP [ESP]).
8198 */
8199RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP
8200{
8201 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
8202# define SET_SS_DEF() \
8203 do \
8204 { \
8205 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8206 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8207 } while (0)
8208
8209 if (!IEM_IS_64BIT_CODE(pVCpu))
8210 {
8211/** @todo Check the effective address size crap! */
8212 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8213 {
8214 uint16_t u16EffAddr;
8215
8216 /* Handle the disp16 form with no registers first. */
8217 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8218 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8219 else
8220 {
8221 /* Get the displacment. */
8222 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8223 {
8224 case 0: u16EffAddr = 0; break;
8225 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8226 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8227 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */
8228 }
8229
8230 /* Add the base and index registers to the disp. */
8231 switch (bRm & X86_MODRM_RM_MASK)
8232 {
8233 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8234 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8235 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8236 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8237 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8238 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8239 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8240 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8241 }
8242 }
8243
8244 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
8245 return u16EffAddr;
8246 }
8247
8248 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8249 uint32_t u32EffAddr;
8250
8251 /* Handle the disp32 form with no registers first. */
8252 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8253 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8254 else
8255 {
8256 /* Get the register (or SIB) value. */
8257 switch ((bRm & X86_MODRM_RM_MASK))
8258 {
8259 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8260 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8261 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8262 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8263 case 4: /* SIB */
8264 {
8265 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8266
8267 /* Get the index and scale it. */
8268 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8269 {
8270 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8271 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8272 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8273 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8274 case 4: u32EffAddr = 0; /*none */ break;
8275 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8276 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8277 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8278 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8279 }
8280 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8281
8282 /* add base */
8283 switch (bSib & X86_SIB_BASE_MASK)
8284 {
8285 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8286 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8287 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8288 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8289 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8290 case 5:
8291 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8292 {
8293 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8294 SET_SS_DEF();
8295 }
8296 else
8297 {
8298 uint32_t u32Disp;
8299 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8300 u32EffAddr += u32Disp;
8301 }
8302 break;
8303 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8304 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8305 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8306 }
8307 break;
8308 }
8309 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8310 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8311 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8312 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8313 }
8314
8315 /* Get and add the displacement. */
8316 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8317 {
8318 case 0:
8319 break;
8320 case 1:
8321 {
8322 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8323 u32EffAddr += i8Disp;
8324 break;
8325 }
8326 case 2:
8327 {
8328 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8329 u32EffAddr += u32Disp;
8330 break;
8331 }
8332 default:
8333 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */
8334 }
8335 }
8336
8337 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8338 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
8339 return u32EffAddr;
8340 }
8341
8342 uint64_t u64EffAddr;
8343
8344 /* Handle the rip+disp32 form with no registers first. */
8345 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8346 {
8347 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8348 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8349 }
8350 else
8351 {
8352 /* Get the register (or SIB) value. */
8353 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8354 {
8355 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8356 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8357 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8358 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8359 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8360 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8361 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8362 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8363 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8364 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8365 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8366 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8367 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8368 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8369 /* SIB */
8370 case 4:
8371 case 12:
8372 {
8373 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8374
8375 /* Get the index and scale it. */
8376 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8377 {
8378 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8379 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8380 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8381 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8382 case 4: u64EffAddr = 0; /*none */ break;
8383 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8384 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8385 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8386 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8387 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8388 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8389 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8390 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8391 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8392 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8393 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8394 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8395 }
8396 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8397
8398 /* add base */
8399 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8400 {
8401 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8402 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8403 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8404 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8405 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8406 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8407 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8408 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8409 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8410 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8411 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8412 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8413 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8414 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8415 /* complicated encodings */
8416 case 5:
8417 case 13:
8418 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8419 {
8420 if (!pVCpu->iem.s.uRexB)
8421 {
8422 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8423 SET_SS_DEF();
8424 }
8425 else
8426 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8427 }
8428 else
8429 {
8430 uint32_t u32Disp;
8431 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8432 u64EffAddr += (int32_t)u32Disp;
8433 }
8434 break;
8435 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8436 }
8437 break;
8438 }
8439 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8440 }
8441
8442 /* Get and add the displacement. */
8443 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8444 {
8445 case 0:
8446 break;
8447 case 1:
8448 {
8449 int8_t i8Disp;
8450 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8451 u64EffAddr += i8Disp;
8452 break;
8453 }
8454 case 2:
8455 {
8456 uint32_t u32Disp;
8457 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8458 u64EffAddr += (int32_t)u32Disp;
8459 break;
8460 }
8461 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
8462 }
8463
8464 }
8465
8466 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8467 {
8468 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
8469 return u64EffAddr;
8470 }
8471 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8472 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
8473 return u64EffAddr & UINT32_MAX;
8474}
8475#endif /* IEM_WITH_SETJMP */
8476
8477
8478/**
8479 * Calculates the effective address of a ModR/M memory operand, extended version
8480 * for use in the recompilers.
8481 *
8482 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8483 *
8484 * @return Strict VBox status code.
8485 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8486 * @param bRm The ModRM byte.
8487 * @param cbImmAndRspOffset - First byte: The size of any immediate
8488 * following the effective address opcode bytes
8489 * (only for RIP relative addressing).
8490 * - Second byte: RSP displacement (for POP [ESP]).
8491 * @param pGCPtrEff Where to return the effective address.
8492 * @param puInfo Extra info: 32-bit displacement (bits 31:0) and
8493 * SIB byte (bits 39:32).
8494 */
8495VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT
8496{
8497 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8498# define SET_SS_DEF() \
8499 do \
8500 { \
8501 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8502 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8503 } while (0)
8504
8505 uint64_t uInfo;
8506 if (!IEM_IS_64BIT_CODE(pVCpu))
8507 {
8508/** @todo Check the effective address size crap! */
8509 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8510 {
8511 uint16_t u16EffAddr;
8512
8513 /* Handle the disp16 form with no registers first. */
8514 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8515 {
8516 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8517 uInfo = u16EffAddr;
8518 }
8519 else
8520 {
8521 /* Get the displacment. */
8522 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8523 {
8524 case 0: u16EffAddr = 0; break;
8525 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8526 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8527 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8528 }
8529 uInfo = u16EffAddr;
8530
8531 /* Add the base and index registers to the disp. */
8532 switch (bRm & X86_MODRM_RM_MASK)
8533 {
8534 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8535 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8536 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8537 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8538 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8539 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8540 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8541 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8542 }
8543 }
8544
8545 *pGCPtrEff = u16EffAddr;
8546 }
8547 else
8548 {
8549 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8550 uint32_t u32EffAddr;
8551
8552 /* Handle the disp32 form with no registers first. */
8553 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8554 {
8555 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8556 uInfo = u32EffAddr;
8557 }
8558 else
8559 {
8560 /* Get the register (or SIB) value. */
8561 uInfo = 0;
8562 switch ((bRm & X86_MODRM_RM_MASK))
8563 {
8564 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8565 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8566 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8567 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8568 case 4: /* SIB */
8569 {
8570 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8571 uInfo = (uint64_t)bSib << 32;
8572
8573 /* Get the index and scale it. */
8574 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8575 {
8576 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8577 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8578 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8579 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8580 case 4: u32EffAddr = 0; /*none */ break;
8581 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8582 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8583 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8584 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8585 }
8586 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8587
8588 /* add base */
8589 switch (bSib & X86_SIB_BASE_MASK)
8590 {
8591 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8592 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8593 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8594 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8595 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8596 case 5:
8597 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8598 {
8599 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8600 SET_SS_DEF();
8601 }
8602 else
8603 {
8604 uint32_t u32Disp;
8605 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8606 u32EffAddr += u32Disp;
8607 uInfo |= u32Disp;
8608 }
8609 break;
8610 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8611 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8612 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8613 }
8614 break;
8615 }
8616 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8617 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8618 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8619 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8620 }
8621
8622 /* Get and add the displacement. */
8623 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8624 {
8625 case 0:
8626 break;
8627 case 1:
8628 {
8629 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8630 u32EffAddr += i8Disp;
8631 uInfo |= (uint32_t)(int32_t)i8Disp;
8632 break;
8633 }
8634 case 2:
8635 {
8636 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8637 u32EffAddr += u32Disp;
8638 uInfo |= (uint32_t)u32Disp;
8639 break;
8640 }
8641 default:
8642 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8643 }
8644
8645 }
8646 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8647 *pGCPtrEff = u32EffAddr;
8648 }
8649 }
8650 else
8651 {
8652 uint64_t u64EffAddr;
8653
8654 /* Handle the rip+disp32 form with no registers first. */
8655 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8656 {
8657 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8658 uInfo = (uint32_t)u64EffAddr;
8659 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8660 }
8661 else
8662 {
8663 /* Get the register (or SIB) value. */
8664 uInfo = 0;
8665 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8666 {
8667 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8668 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8669 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8670 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8671 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8672 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8673 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8674 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8675 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8676 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8677 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8678 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8679 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8680 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8681 /* SIB */
8682 case 4:
8683 case 12:
8684 {
8685 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8686 uInfo = (uint64_t)bSib << 32;
8687
8688 /* Get the index and scale it. */
8689 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8690 {
8691 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8692 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8693 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8694 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8695 case 4: u64EffAddr = 0; /*none */ break;
8696 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8697 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8698 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8699 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8700 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8701 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8702 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8703 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8704 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8705 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8706 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8707 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8708 }
8709 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8710
8711 /* add base */
8712 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8713 {
8714 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8715 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8716 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8717 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8718 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8719 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8720 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8721 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8722 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8723 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8724 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8725 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8726 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8727 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8728 /* complicated encodings */
8729 case 5:
8730 case 13:
8731 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8732 {
8733 if (!pVCpu->iem.s.uRexB)
8734 {
8735 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8736 SET_SS_DEF();
8737 }
8738 else
8739 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8740 }
8741 else
8742 {
8743 uint32_t u32Disp;
8744 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8745 u64EffAddr += (int32_t)u32Disp;
8746 uInfo |= u32Disp;
8747 }
8748 break;
8749 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8750 }
8751 break;
8752 }
8753 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8754 }
8755
8756 /* Get and add the displacement. */
8757 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8758 {
8759 case 0:
8760 break;
8761 case 1:
8762 {
8763 int8_t i8Disp;
8764 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8765 u64EffAddr += i8Disp;
8766 uInfo |= (uint32_t)(int32_t)i8Disp;
8767 break;
8768 }
8769 case 2:
8770 {
8771 uint32_t u32Disp;
8772 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8773 u64EffAddr += (int32_t)u32Disp;
8774 uInfo |= u32Disp;
8775 break;
8776 }
8777 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8778 }
8779
8780 }
8781
8782 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8783 *pGCPtrEff = u64EffAddr;
8784 else
8785 {
8786 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8787 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8788 }
8789 }
8790 *puInfo = uInfo;
8791
8792 Log5(("iemOpHlpCalcRmEffAddrEx: EffAddr=%#010RGv uInfo=%RX64\n", *pGCPtrEff, uInfo));
8793 return VINF_SUCCESS;
8794}
8795
8796/** @} */
8797
8798
8799#ifdef LOG_ENABLED
8800/**
8801 * Logs the current instruction.
8802 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
8803 * @param fSameCtx Set if we have the same context information as the VMM,
8804 * clear if we may have already executed an instruction in
8805 * our debug context. When clear, we assume IEMCPU holds
8806 * valid CPU mode info.
8807 *
8808 * The @a fSameCtx parameter is now misleading and obsolete.
8809 * @param pszFunction The IEM function doing the execution.
8810 */
8811static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
8812{
8813# ifdef IN_RING3
8814 if (LogIs2Enabled())
8815 {
8816 char szInstr[256];
8817 uint32_t cbInstr = 0;
8818 if (fSameCtx)
8819 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
8820 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
8821 szInstr, sizeof(szInstr), &cbInstr);
8822 else
8823 {
8824 uint32_t fFlags = 0;
8825 switch (IEM_GET_CPU_MODE(pVCpu))
8826 {
8827 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
8828 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
8829 case IEMMODE_16BIT:
8830 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
8831 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
8832 else
8833 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
8834 break;
8835 }
8836 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
8837 szInstr, sizeof(szInstr), &cbInstr);
8838 }
8839
8840 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
8841 Log2(("**** %s fExec=%x\n"
8842 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
8843 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
8844 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
8845 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
8846 " %s\n"
8847 , pszFunction, pVCpu->iem.s.fExec,
8848 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
8849 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
8850 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
8851 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
8852 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
8853 szInstr));
8854
8855 /* This stuff sucks atm. as it fills the log with MSRs. */
8856 //if (LogIs3Enabled())
8857 // DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
8858 }
8859 else
8860# endif
8861 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
8862 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
8863 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
8864}
8865#endif /* LOG_ENABLED */
8866
8867
8868#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8869/**
8870 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
8871 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
8872 *
8873 * @returns Modified rcStrict.
8874 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8875 * @param rcStrict The instruction execution status.
8876 */
8877static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
8878{
8879 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
8880 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
8881 {
8882 /* VMX preemption timer takes priority over NMI-window exits. */
8883 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
8884 {
8885 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
8886 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
8887 }
8888 /*
8889 * Check remaining intercepts.
8890 *
8891 * NMI-window and Interrupt-window VM-exits.
8892 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
8893 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
8894 *
8895 * See Intel spec. 26.7.6 "NMI-Window Exiting".
8896 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
8897 */
8898 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
8899 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
8900 && !TRPMHasTrap(pVCpu))
8901 {
8902 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
8903 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
8904 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
8905 {
8906 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
8907 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
8908 }
8909 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
8910 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
8911 {
8912 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
8913 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
8914 }
8915 }
8916 }
8917 /* TPR-below threshold/APIC write has the highest priority. */
8918 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
8919 {
8920 rcStrict = iemVmxApicWriteEmulation(pVCpu);
8921 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
8922 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
8923 }
8924 /* MTF takes priority over VMX-preemption timer. */
8925 else
8926 {
8927 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
8928 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
8929 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
8930 }
8931 return rcStrict;
8932}
8933#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
8934
8935
8936/**
8937 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
8938 * IEMExecOneWithPrefetchedByPC.
8939 *
8940 * Similar code is found in IEMExecLots.
8941 *
8942 * @return Strict VBox status code.
8943 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
8944 * @param fExecuteInhibit If set, execute the instruction following CLI,
8945 * POP SS and MOV SS,GR.
8946 * @param pszFunction The calling function name.
8947 */
8948DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
8949{
8950 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
8951 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
8952 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
8953 RT_NOREF_PV(pszFunction);
8954
8955#ifdef IEM_WITH_SETJMP
8956 VBOXSTRICTRC rcStrict;
8957 IEM_TRY_SETJMP(pVCpu, rcStrict)
8958 {
8959 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
8960 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
8961 }
8962 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
8963 {
8964 pVCpu->iem.s.cLongJumps++;
8965 }
8966 IEM_CATCH_LONGJMP_END(pVCpu);
8967#else
8968 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
8969 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
8970#endif
8971 if (rcStrict == VINF_SUCCESS)
8972 pVCpu->iem.s.cInstructions++;
8973 if (pVCpu->iem.s.cActiveMappings > 0)
8974 {
8975 Assert(rcStrict != VINF_SUCCESS);
8976 iemMemRollback(pVCpu);
8977 }
8978 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
8979 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
8980 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
8981
8982//#ifdef DEBUG
8983// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
8984//#endif
8985
8986#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8987 /*
8988 * Perform any VMX nested-guest instruction boundary actions.
8989 *
8990 * If any of these causes a VM-exit, we must skip executing the next
8991 * instruction (would run into stale page tables). A VM-exit makes sure
8992 * there is no interrupt-inhibition, so that should ensure we don't go
8993 * to try execute the next instruction. Clearing fExecuteInhibit is
8994 * problematic because of the setjmp/longjmp clobbering above.
8995 */
8996 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
8997 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
8998 || rcStrict != VINF_SUCCESS)
8999 { /* likely */ }
9000 else
9001 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9002#endif
9003
9004 /* Execute the next instruction as well if a cli, pop ss or
9005 mov ss, Gr has just completed successfully. */
9006 if ( fExecuteInhibit
9007 && rcStrict == VINF_SUCCESS
9008 && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
9009 {
9010 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fExec & (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK));
9011 if (rcStrict == VINF_SUCCESS)
9012 {
9013#ifdef LOG_ENABLED
9014 iemLogCurInstr(pVCpu, false, pszFunction);
9015#endif
9016#ifdef IEM_WITH_SETJMP
9017 IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
9018 {
9019 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9020 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9021 }
9022 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9023 {
9024 pVCpu->iem.s.cLongJumps++;
9025 }
9026 IEM_CATCH_LONGJMP_END(pVCpu);
9027#else
9028 IEM_OPCODE_GET_FIRST_U8(&b);
9029 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9030#endif
9031 if (rcStrict == VINF_SUCCESS)
9032 {
9033 pVCpu->iem.s.cInstructions++;
9034#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9035 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9036 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
9037 { /* likely */ }
9038 else
9039 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9040#endif
9041 }
9042 if (pVCpu->iem.s.cActiveMappings > 0)
9043 {
9044 Assert(rcStrict != VINF_SUCCESS);
9045 iemMemRollback(pVCpu);
9046 }
9047 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9048 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9049 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9050 }
9051 else if (pVCpu->iem.s.cActiveMappings > 0)
9052 iemMemRollback(pVCpu);
9053 /** @todo drop this after we bake this change into RIP advancing. */
9054 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
9055 }
9056
9057 /*
9058 * Return value fiddling, statistics and sanity assertions.
9059 */
9060 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9061
9062 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9063 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9064 return rcStrict;
9065}
9066
9067
9068/**
9069 * Execute one instruction.
9070 *
9071 * @return Strict VBox status code.
9072 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9073 */
9074VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
9075{
9076 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
9077#ifdef LOG_ENABLED
9078 iemLogCurInstr(pVCpu, true, "IEMExecOne");
9079#endif
9080
9081 /*
9082 * Do the decoding and emulation.
9083 */
9084 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9085 if (rcStrict == VINF_SUCCESS)
9086 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
9087 else if (pVCpu->iem.s.cActiveMappings > 0)
9088 iemMemRollback(pVCpu);
9089
9090 if (rcStrict != VINF_SUCCESS)
9091 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9092 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9093 return rcStrict;
9094}
9095
9096
9097VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9098{
9099 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9100 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9101 if (rcStrict == VINF_SUCCESS)
9102 {
9103 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
9104 if (pcbWritten)
9105 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9106 }
9107 else if (pVCpu->iem.s.cActiveMappings > 0)
9108 iemMemRollback(pVCpu);
9109
9110 return rcStrict;
9111}
9112
9113
9114VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9115 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9116{
9117 VBOXSTRICTRC rcStrict;
9118 if ( cbOpcodeBytes
9119 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9120 {
9121 iemInitDecoder(pVCpu, 0 /*fExecOpts*/);
9122#ifdef IEM_WITH_CODE_TLB
9123 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9124 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9125 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9126 pVCpu->iem.s.offCurInstrStart = 0;
9127 pVCpu->iem.s.offInstrNextByte = 0;
9128 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9129#else
9130 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9131 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9132#endif
9133 rcStrict = VINF_SUCCESS;
9134 }
9135 else
9136 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9137 if (rcStrict == VINF_SUCCESS)
9138 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
9139 else if (pVCpu->iem.s.cActiveMappings > 0)
9140 iemMemRollback(pVCpu);
9141
9142 return rcStrict;
9143}
9144
9145
9146VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9147{
9148 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9149 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9150 if (rcStrict == VINF_SUCCESS)
9151 {
9152 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
9153 if (pcbWritten)
9154 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9155 }
9156 else if (pVCpu->iem.s.cActiveMappings > 0)
9157 iemMemRollback(pVCpu);
9158
9159 return rcStrict;
9160}
9161
9162
9163VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9164 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9165{
9166 VBOXSTRICTRC rcStrict;
9167 if ( cbOpcodeBytes
9168 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9169 {
9170 iemInitDecoder(pVCpu, IEM_F_BYPASS_HANDLERS);
9171#ifdef IEM_WITH_CODE_TLB
9172 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9173 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9174 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9175 pVCpu->iem.s.offCurInstrStart = 0;
9176 pVCpu->iem.s.offInstrNextByte = 0;
9177 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9178#else
9179 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9180 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9181#endif
9182 rcStrict = VINF_SUCCESS;
9183 }
9184 else
9185 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9186 if (rcStrict == VINF_SUCCESS)
9187 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
9188 else if (pVCpu->iem.s.cActiveMappings > 0)
9189 iemMemRollback(pVCpu);
9190
9191 return rcStrict;
9192}
9193
9194
9195/**
9196 * For handling split cacheline lock operations when the host has split-lock
9197 * detection enabled.
9198 *
9199 * This will cause the interpreter to disregard the lock prefix and implicit
9200 * locking (xchg).
9201 *
9202 * @returns Strict VBox status code.
9203 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9204 */
9205VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
9206{
9207 /*
9208 * Do the decoding and emulation.
9209 */
9210 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_X86_DISREGARD_LOCK);
9211 if (rcStrict == VINF_SUCCESS)
9212 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
9213 else if (pVCpu->iem.s.cActiveMappings > 0)
9214 iemMemRollback(pVCpu);
9215
9216 if (rcStrict != VINF_SUCCESS)
9217 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9218 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9219 return rcStrict;
9220}
9221
9222
9223/**
9224 * Code common to IEMExecLots and IEMExecRecompilerThreaded that attempts to
9225 * inject a pending TRPM trap.
9226 */
9227VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu)
9228{
9229 Assert(TRPMHasTrap(pVCpu));
9230
9231 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9232 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
9233 {
9234 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
9235#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9236 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
9237 if (fIntrEnabled)
9238 {
9239 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
9240 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9241 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
9242 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
9243 else
9244 {
9245 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
9246 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
9247 }
9248 }
9249#else
9250 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9251#endif
9252 if (fIntrEnabled)
9253 {
9254 uint8_t u8TrapNo;
9255 TRPMEVENT enmType;
9256 uint32_t uErrCode;
9257 RTGCPTR uCr2;
9258 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
9259 AssertRC(rc2);
9260 Assert(enmType == TRPM_HARDWARE_INT);
9261 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
9262
9263 TRPMResetTrap(pVCpu);
9264
9265#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9266 /* Injecting an event may cause a VM-exit. */
9267 if ( rcStrict != VINF_SUCCESS
9268 && rcStrict != VINF_IEM_RAISED_XCPT)
9269 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
9270#else
9271 NOREF(rcStrict);
9272#endif
9273 }
9274 }
9275
9276 return VINF_SUCCESS;
9277}
9278
9279
9280VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
9281{
9282 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
9283 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
9284 Assert(cMaxInstructions > 0);
9285
9286 /*
9287 * See if there is an interrupt pending in TRPM, inject it if we can.
9288 */
9289 /** @todo What if we are injecting an exception and not an interrupt? Is that
9290 * possible here? For now we assert it is indeed only an interrupt. */
9291 if (!TRPMHasTrap(pVCpu))
9292 { /* likely */ }
9293 else
9294 {
9295 VBOXSTRICTRC rcStrict = iemExecInjectPendingTrap(pVCpu);
9296 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9297 { /*likely */ }
9298 else
9299 return rcStrict;
9300 }
9301
9302 /*
9303 * Initial decoder init w/ prefetch, then setup setjmp.
9304 */
9305 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9306 if (rcStrict == VINF_SUCCESS)
9307 {
9308#ifdef IEM_WITH_SETJMP
9309 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
9310 IEM_TRY_SETJMP(pVCpu, rcStrict)
9311#endif
9312 {
9313 /*
9314 * The run loop. We limit ourselves to 4096 instructions right now.
9315 */
9316 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
9317 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9318 for (;;)
9319 {
9320 /*
9321 * Log the state.
9322 */
9323#ifdef LOG_ENABLED
9324 iemLogCurInstr(pVCpu, true, "IEMExecLots");
9325#endif
9326
9327 /*
9328 * Do the decoding and emulation.
9329 */
9330 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9331 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9332#ifdef VBOX_STRICT
9333 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
9334#endif
9335 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9336 {
9337 Assert(pVCpu->iem.s.cActiveMappings == 0);
9338 pVCpu->iem.s.cInstructions++;
9339
9340#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9341 /* Perform any VMX nested-guest instruction boundary actions. */
9342 uint64_t fCpu = pVCpu->fLocalForcedActions;
9343 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9344 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
9345 { /* likely */ }
9346 else
9347 {
9348 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9349 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9350 fCpu = pVCpu->fLocalForcedActions;
9351 else
9352 {
9353 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9354 break;
9355 }
9356 }
9357#endif
9358 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9359 {
9360#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
9361 uint64_t fCpu = pVCpu->fLocalForcedActions;
9362#endif
9363 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9364 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9365 | VMCPU_FF_TLB_FLUSH
9366 | VMCPU_FF_UNHALT );
9367
9368 if (RT_LIKELY( ( !fCpu
9369 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9370 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
9371 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
9372 {
9373 if (--cMaxInstructionsGccStupidity > 0)
9374 {
9375 /* Poll timers every now an then according to the caller's specs. */
9376 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
9377 || !TMTimerPollBool(pVM, pVCpu))
9378 {
9379 Assert(pVCpu->iem.s.cActiveMappings == 0);
9380 iemReInitDecoder(pVCpu);
9381 continue;
9382 }
9383 }
9384 }
9385 }
9386 Assert(pVCpu->iem.s.cActiveMappings == 0);
9387 }
9388 else if (pVCpu->iem.s.cActiveMappings > 0)
9389 iemMemRollback(pVCpu);
9390 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9391 break;
9392 }
9393 }
9394#ifdef IEM_WITH_SETJMP
9395 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9396 {
9397 if (pVCpu->iem.s.cActiveMappings > 0)
9398 iemMemRollback(pVCpu);
9399# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9400 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9401# endif
9402 pVCpu->iem.s.cLongJumps++;
9403 }
9404 IEM_CATCH_LONGJMP_END(pVCpu);
9405#endif
9406
9407 /*
9408 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9409 */
9410 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9411 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9412 }
9413 else
9414 {
9415 if (pVCpu->iem.s.cActiveMappings > 0)
9416 iemMemRollback(pVCpu);
9417
9418#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9419 /*
9420 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9421 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9422 */
9423 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9424#endif
9425 }
9426
9427 /*
9428 * Maybe re-enter raw-mode and log.
9429 */
9430 if (rcStrict != VINF_SUCCESS)
9431 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9432 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9433 if (pcInstructions)
9434 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
9435 return rcStrict;
9436}
9437
9438
9439/**
9440 * Interface used by EMExecuteExec, does exit statistics and limits.
9441 *
9442 * @returns Strict VBox status code.
9443 * @param pVCpu The cross context virtual CPU structure.
9444 * @param fWillExit To be defined.
9445 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
9446 * @param cMaxInstructions Maximum number of instructions to execute.
9447 * @param cMaxInstructionsWithoutExits
9448 * The max number of instructions without exits.
9449 * @param pStats Where to return statistics.
9450 */
9451VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
9452 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
9453{
9454 NOREF(fWillExit); /** @todo define flexible exit crits */
9455
9456 /*
9457 * Initialize return stats.
9458 */
9459 pStats->cInstructions = 0;
9460 pStats->cExits = 0;
9461 pStats->cMaxExitDistance = 0;
9462 pStats->cReserved = 0;
9463
9464 /*
9465 * Initial decoder init w/ prefetch, then setup setjmp.
9466 */
9467 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9468 if (rcStrict == VINF_SUCCESS)
9469 {
9470#ifdef IEM_WITH_SETJMP
9471 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf?!? */
9472 IEM_TRY_SETJMP(pVCpu, rcStrict)
9473#endif
9474 {
9475#ifdef IN_RING0
9476 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
9477#endif
9478 uint32_t cInstructionSinceLastExit = 0;
9479
9480 /*
9481 * The run loop. We limit ourselves to 4096 instructions right now.
9482 */
9483 PVM pVM = pVCpu->CTX_SUFF(pVM);
9484 for (;;)
9485 {
9486 /*
9487 * Log the state.
9488 */
9489#ifdef LOG_ENABLED
9490 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
9491#endif
9492
9493 /*
9494 * Do the decoding and emulation.
9495 */
9496 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
9497
9498 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9499 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9500
9501 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
9502 && cInstructionSinceLastExit > 0 /* don't count the first */ )
9503 {
9504 pStats->cExits += 1;
9505 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
9506 pStats->cMaxExitDistance = cInstructionSinceLastExit;
9507 cInstructionSinceLastExit = 0;
9508 }
9509
9510 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9511 {
9512 Assert(pVCpu->iem.s.cActiveMappings == 0);
9513 pVCpu->iem.s.cInstructions++;
9514 pStats->cInstructions++;
9515 cInstructionSinceLastExit++;
9516
9517#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9518 /* Perform any VMX nested-guest instruction boundary actions. */
9519 uint64_t fCpu = pVCpu->fLocalForcedActions;
9520 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9521 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
9522 { /* likely */ }
9523 else
9524 {
9525 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9526 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9527 fCpu = pVCpu->fLocalForcedActions;
9528 else
9529 {
9530 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9531 break;
9532 }
9533 }
9534#endif
9535 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9536 {
9537#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
9538 uint64_t fCpu = pVCpu->fLocalForcedActions;
9539#endif
9540 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9541 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9542 | VMCPU_FF_TLB_FLUSH
9543 | VMCPU_FF_UNHALT );
9544 if (RT_LIKELY( ( ( !fCpu
9545 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9546 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
9547 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
9548 || pStats->cInstructions < cMinInstructions))
9549 {
9550 if (pStats->cInstructions < cMaxInstructions)
9551 {
9552 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
9553 {
9554#ifdef IN_RING0
9555 if ( !fCheckPreemptionPending
9556 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
9557#endif
9558 {
9559 Assert(pVCpu->iem.s.cActiveMappings == 0);
9560 iemReInitDecoder(pVCpu);
9561 continue;
9562 }
9563#ifdef IN_RING0
9564 rcStrict = VINF_EM_RAW_INTERRUPT;
9565 break;
9566#endif
9567 }
9568 }
9569 }
9570 Assert(!(fCpu & VMCPU_FF_IEM));
9571 }
9572 Assert(pVCpu->iem.s.cActiveMappings == 0);
9573 }
9574 else if (pVCpu->iem.s.cActiveMappings > 0)
9575 iemMemRollback(pVCpu);
9576 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9577 break;
9578 }
9579 }
9580#ifdef IEM_WITH_SETJMP
9581 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9582 {
9583 if (pVCpu->iem.s.cActiveMappings > 0)
9584 iemMemRollback(pVCpu);
9585 pVCpu->iem.s.cLongJumps++;
9586 }
9587 IEM_CATCH_LONGJMP_END(pVCpu);
9588#endif
9589
9590 /*
9591 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9592 */
9593 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9594 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9595 }
9596 else
9597 {
9598 if (pVCpu->iem.s.cActiveMappings > 0)
9599 iemMemRollback(pVCpu);
9600
9601#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9602 /*
9603 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9604 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9605 */
9606 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9607#endif
9608 }
9609
9610 /*
9611 * Maybe re-enter raw-mode and log.
9612 */
9613 if (rcStrict != VINF_SUCCESS)
9614 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
9615 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
9616 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
9617 return rcStrict;
9618}
9619
9620
9621/**
9622 * Injects a trap, fault, abort, software interrupt or external interrupt.
9623 *
9624 * The parameter list matches TRPMQueryTrapAll pretty closely.
9625 *
9626 * @returns Strict VBox status code.
9627 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9628 * @param u8TrapNo The trap number.
9629 * @param enmType What type is it (trap/fault/abort), software
9630 * interrupt or hardware interrupt.
9631 * @param uErrCode The error code if applicable.
9632 * @param uCr2 The CR2 value if applicable.
9633 * @param cbInstr The instruction length (only relevant for
9634 * software interrupts).
9635 */
9636VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
9637 uint8_t cbInstr)
9638{
9639 iemInitDecoder(pVCpu, 0 /*fExecOpts*/); /** @todo wrong init function! */
9640#ifdef DBGFTRACE_ENABLED
9641 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
9642 u8TrapNo, enmType, uErrCode, uCr2);
9643#endif
9644
9645 uint32_t fFlags;
9646 switch (enmType)
9647 {
9648 case TRPM_HARDWARE_INT:
9649 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
9650 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
9651 uErrCode = uCr2 = 0;
9652 break;
9653
9654 case TRPM_SOFTWARE_INT:
9655 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
9656 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
9657 uErrCode = uCr2 = 0;
9658 break;
9659
9660 case TRPM_TRAP:
9661 case TRPM_NMI: /** @todo Distinguish NMI from exception 2. */
9662 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
9663 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
9664 if (u8TrapNo == X86_XCPT_PF)
9665 fFlags |= IEM_XCPT_FLAGS_CR2;
9666 switch (u8TrapNo)
9667 {
9668 case X86_XCPT_DF:
9669 case X86_XCPT_TS:
9670 case X86_XCPT_NP:
9671 case X86_XCPT_SS:
9672 case X86_XCPT_PF:
9673 case X86_XCPT_AC:
9674 case X86_XCPT_GP:
9675 fFlags |= IEM_XCPT_FLAGS_ERR;
9676 break;
9677 }
9678 break;
9679
9680 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9681 }
9682
9683 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
9684
9685 if (pVCpu->iem.s.cActiveMappings > 0)
9686 iemMemRollback(pVCpu);
9687
9688 return rcStrict;
9689}
9690
9691
9692/**
9693 * Injects the active TRPM event.
9694 *
9695 * @returns Strict VBox status code.
9696 * @param pVCpu The cross context virtual CPU structure.
9697 */
9698VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
9699{
9700#ifndef IEM_IMPLEMENTS_TASKSWITCH
9701 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
9702#else
9703 uint8_t u8TrapNo;
9704 TRPMEVENT enmType;
9705 uint32_t uErrCode;
9706 RTGCUINTPTR uCr2;
9707 uint8_t cbInstr;
9708 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
9709 if (RT_FAILURE(rc))
9710 return rc;
9711
9712 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
9713 * ICEBP \#DB injection as a special case. */
9714 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
9715#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
9716 if (rcStrict == VINF_SVM_VMEXIT)
9717 rcStrict = VINF_SUCCESS;
9718#endif
9719#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9720 if (rcStrict == VINF_VMX_VMEXIT)
9721 rcStrict = VINF_SUCCESS;
9722#endif
9723 /** @todo Are there any other codes that imply the event was successfully
9724 * delivered to the guest? See @bugref{6607}. */
9725 if ( rcStrict == VINF_SUCCESS
9726 || rcStrict == VINF_IEM_RAISED_XCPT)
9727 TRPMResetTrap(pVCpu);
9728
9729 return rcStrict;
9730#endif
9731}
9732
9733
9734VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
9735{
9736 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
9737 return VERR_NOT_IMPLEMENTED;
9738}
9739
9740
9741VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
9742{
9743 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
9744 return VERR_NOT_IMPLEMENTED;
9745}
9746
9747
9748/**
9749 * Interface for HM and EM for executing string I/O OUT (write) instructions.
9750 *
9751 * This API ASSUMES that the caller has already verified that the guest code is
9752 * allowed to access the I/O port. (The I/O port is in the DX register in the
9753 * guest state.)
9754 *
9755 * @returns Strict VBox status code.
9756 * @param pVCpu The cross context virtual CPU structure.
9757 * @param cbValue The size of the I/O port access (1, 2, or 4).
9758 * @param enmAddrMode The addressing mode.
9759 * @param fRepPrefix Indicates whether a repeat prefix is used
9760 * (doesn't matter which for this instruction).
9761 * @param cbInstr The instruction length in bytes.
9762 * @param iEffSeg The effective segment address.
9763 * @param fIoChecked Whether the access to the I/O port has been
9764 * checked or not. It's typically checked in the
9765 * HM scenario.
9766 */
9767VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
9768 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
9769{
9770 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
9771 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
9772
9773 /*
9774 * State init.
9775 */
9776 iemInitExec(pVCpu, 0 /*fExecOpts*/);
9777
9778 /*
9779 * Switch orgy for getting to the right handler.
9780 */
9781 VBOXSTRICTRC rcStrict;
9782 if (fRepPrefix)
9783 {
9784 switch (enmAddrMode)
9785 {
9786 case IEMMODE_16BIT:
9787 switch (cbValue)
9788 {
9789 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9790 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9791 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9792 default:
9793 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9794 }
9795 break;
9796
9797 case IEMMODE_32BIT:
9798 switch (cbValue)
9799 {
9800 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9801 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9802 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9803 default:
9804 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9805 }
9806 break;
9807
9808 case IEMMODE_64BIT:
9809 switch (cbValue)
9810 {
9811 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9812 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9813 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9814 default:
9815 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9816 }
9817 break;
9818
9819 default:
9820 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
9821 }
9822 }
9823 else
9824 {
9825 switch (enmAddrMode)
9826 {
9827 case IEMMODE_16BIT:
9828 switch (cbValue)
9829 {
9830 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9831 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9832 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9833 default:
9834 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9835 }
9836 break;
9837
9838 case IEMMODE_32BIT:
9839 switch (cbValue)
9840 {
9841 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9842 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9843 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9844 default:
9845 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9846 }
9847 break;
9848
9849 case IEMMODE_64BIT:
9850 switch (cbValue)
9851 {
9852 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9853 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9854 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9855 default:
9856 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9857 }
9858 break;
9859
9860 default:
9861 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
9862 }
9863 }
9864
9865 if (pVCpu->iem.s.cActiveMappings)
9866 iemMemRollback(pVCpu);
9867
9868 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
9869}
9870
9871
9872/**
9873 * Interface for HM and EM for executing string I/O IN (read) instructions.
9874 *
9875 * This API ASSUMES that the caller has already verified that the guest code is
9876 * allowed to access the I/O port. (The I/O port is in the DX register in the
9877 * guest state.)
9878 *
9879 * @returns Strict VBox status code.
9880 * @param pVCpu The cross context virtual CPU structure.
9881 * @param cbValue The size of the I/O port access (1, 2, or 4).
9882 * @param enmAddrMode The addressing mode.
9883 * @param fRepPrefix Indicates whether a repeat prefix is used
9884 * (doesn't matter which for this instruction).
9885 * @param cbInstr The instruction length in bytes.
9886 * @param fIoChecked Whether the access to the I/O port has been
9887 * checked or not. It's typically checked in the
9888 * HM scenario.
9889 */
9890VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
9891 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
9892{
9893 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
9894
9895 /*
9896 * State init.
9897 */
9898 iemInitExec(pVCpu, 0 /*fExecOpts*/);
9899
9900 /*
9901 * Switch orgy for getting to the right handler.
9902 */
9903 VBOXSTRICTRC rcStrict;
9904 if (fRepPrefix)
9905 {
9906 switch (enmAddrMode)
9907 {
9908 case IEMMODE_16BIT:
9909 switch (cbValue)
9910 {
9911 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
9912 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
9913 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
9914 default:
9915 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9916 }
9917 break;
9918
9919 case IEMMODE_32BIT:
9920 switch (cbValue)
9921 {
9922 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
9923 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
9924 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
9925 default:
9926 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9927 }
9928 break;
9929
9930 case IEMMODE_64BIT:
9931 switch (cbValue)
9932 {
9933 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
9934 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
9935 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
9936 default:
9937 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9938 }
9939 break;
9940
9941 default:
9942 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
9943 }
9944 }
9945 else
9946 {
9947 switch (enmAddrMode)
9948 {
9949 case IEMMODE_16BIT:
9950 switch (cbValue)
9951 {
9952 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
9953 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
9954 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
9955 default:
9956 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9957 }
9958 break;
9959
9960 case IEMMODE_32BIT:
9961 switch (cbValue)
9962 {
9963 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
9964 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
9965 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
9966 default:
9967 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9968 }
9969 break;
9970
9971 case IEMMODE_64BIT:
9972 switch (cbValue)
9973 {
9974 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
9975 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
9976 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
9977 default:
9978 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9979 }
9980 break;
9981
9982 default:
9983 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
9984 }
9985 }
9986
9987 if ( pVCpu->iem.s.cActiveMappings == 0
9988 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
9989 { /* likely */ }
9990 else
9991 {
9992 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
9993 iemMemRollback(pVCpu);
9994 }
9995 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
9996}
9997
9998
9999/**
10000 * Interface for rawmode to write execute an OUT instruction.
10001 *
10002 * @returns Strict VBox status code.
10003 * @param pVCpu The cross context virtual CPU structure.
10004 * @param cbInstr The instruction length in bytes.
10005 * @param u16Port The port to read.
10006 * @param fImm Whether the port is specified using an immediate operand or
10007 * using the implicit DX register.
10008 * @param cbReg The register size.
10009 *
10010 * @remarks In ring-0 not all of the state needs to be synced in.
10011 */
10012VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10013{
10014 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10015 Assert(cbReg <= 4 && cbReg != 3);
10016
10017 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10018 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, cbReg,
10019 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10020 Assert(!pVCpu->iem.s.cActiveMappings);
10021 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10022}
10023
10024
10025/**
10026 * Interface for rawmode to write execute an IN instruction.
10027 *
10028 * @returns Strict VBox status code.
10029 * @param pVCpu The cross context virtual CPU structure.
10030 * @param cbInstr The instruction length in bytes.
10031 * @param u16Port The port to read.
10032 * @param fImm Whether the port is specified using an immediate operand or
10033 * using the implicit DX.
10034 * @param cbReg The register size.
10035 */
10036VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10037{
10038 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10039 Assert(cbReg <= 4 && cbReg != 3);
10040
10041 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10042 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, cbReg,
10043 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10044 Assert(!pVCpu->iem.s.cActiveMappings);
10045 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10046}
10047
10048
10049/**
10050 * Interface for HM and EM to write to a CRx register.
10051 *
10052 * @returns Strict VBox status code.
10053 * @param pVCpu The cross context virtual CPU structure.
10054 * @param cbInstr The instruction length in bytes.
10055 * @param iCrReg The control register number (destination).
10056 * @param iGReg The general purpose register number (source).
10057 *
10058 * @remarks In ring-0 not all of the state needs to be synced in.
10059 */
10060VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
10061{
10062 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10063 Assert(iCrReg < 16);
10064 Assert(iGReg < 16);
10065
10066 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10067 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
10068 Assert(!pVCpu->iem.s.cActiveMappings);
10069 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10070}
10071
10072
10073/**
10074 * Interface for HM and EM to read from a CRx register.
10075 *
10076 * @returns Strict VBox status code.
10077 * @param pVCpu The cross context virtual CPU structure.
10078 * @param cbInstr The instruction length in bytes.
10079 * @param iGReg The general purpose register number (destination).
10080 * @param iCrReg The control register number (source).
10081 *
10082 * @remarks In ring-0 not all of the state needs to be synced in.
10083 */
10084VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
10085{
10086 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10087 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
10088 | CPUMCTX_EXTRN_APIC_TPR);
10089 Assert(iCrReg < 16);
10090 Assert(iGReg < 16);
10091
10092 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10093 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
10094 Assert(!pVCpu->iem.s.cActiveMappings);
10095 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10096}
10097
10098
10099/**
10100 * Interface for HM and EM to write to a DRx register.
10101 *
10102 * @returns Strict VBox status code.
10103 * @param pVCpu The cross context virtual CPU structure.
10104 * @param cbInstr The instruction length in bytes.
10105 * @param iDrReg The debug register number (destination).
10106 * @param iGReg The general purpose register number (source).
10107 *
10108 * @remarks In ring-0 not all of the state needs to be synced in.
10109 */
10110VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iDrReg, uint8_t iGReg)
10111{
10112 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10113 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10114 Assert(iDrReg < 8);
10115 Assert(iGReg < 16);
10116
10117 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10118 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Dd_Rd, iDrReg, iGReg);
10119 Assert(!pVCpu->iem.s.cActiveMappings);
10120 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10121}
10122
10123
10124/**
10125 * Interface for HM and EM to read from a DRx register.
10126 *
10127 * @returns Strict VBox status code.
10128 * @param pVCpu The cross context virtual CPU structure.
10129 * @param cbInstr The instruction length in bytes.
10130 * @param iGReg The general purpose register number (destination).
10131 * @param iDrReg The debug register number (source).
10132 *
10133 * @remarks In ring-0 not all of the state needs to be synced in.
10134 */
10135VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iDrReg)
10136{
10137 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10138 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10139 Assert(iDrReg < 8);
10140 Assert(iGReg < 16);
10141
10142 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10143 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Dd, iGReg, iDrReg);
10144 Assert(!pVCpu->iem.s.cActiveMappings);
10145 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10146}
10147
10148
10149/**
10150 * Interface for HM and EM to clear the CR0[TS] bit.
10151 *
10152 * @returns Strict VBox status code.
10153 * @param pVCpu The cross context virtual CPU structure.
10154 * @param cbInstr The instruction length in bytes.
10155 *
10156 * @remarks In ring-0 not all of the state needs to be synced in.
10157 */
10158VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
10159{
10160 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10161
10162 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10163 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
10164 Assert(!pVCpu->iem.s.cActiveMappings);
10165 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10166}
10167
10168
10169/**
10170 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
10171 *
10172 * @returns Strict VBox status code.
10173 * @param pVCpu The cross context virtual CPU structure.
10174 * @param cbInstr The instruction length in bytes.
10175 * @param uValue The value to load into CR0.
10176 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
10177 * memory operand. Otherwise pass NIL_RTGCPTR.
10178 *
10179 * @remarks In ring-0 not all of the state needs to be synced in.
10180 */
10181VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
10182{
10183 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10184
10185 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10186 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
10187 Assert(!pVCpu->iem.s.cActiveMappings);
10188 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10189}
10190
10191
10192/**
10193 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
10194 *
10195 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
10196 *
10197 * @returns Strict VBox status code.
10198 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10199 * @param cbInstr The instruction length in bytes.
10200 * @remarks In ring-0 not all of the state needs to be synced in.
10201 * @thread EMT(pVCpu)
10202 */
10203VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
10204{
10205 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10206
10207 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10208 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
10209 Assert(!pVCpu->iem.s.cActiveMappings);
10210 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10211}
10212
10213
10214/**
10215 * Interface for HM and EM to emulate the WBINVD instruction.
10216 *
10217 * @returns Strict VBox status code.
10218 * @param pVCpu The cross context virtual CPU structure.
10219 * @param cbInstr The instruction length in bytes.
10220 *
10221 * @remarks In ring-0 not all of the state needs to be synced in.
10222 */
10223VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10224{
10225 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10226
10227 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10228 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
10229 Assert(!pVCpu->iem.s.cActiveMappings);
10230 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10231}
10232
10233
10234/**
10235 * Interface for HM and EM to emulate the INVD instruction.
10236 *
10237 * @returns Strict VBox status code.
10238 * @param pVCpu The cross context virtual CPU structure.
10239 * @param cbInstr The instruction length in bytes.
10240 *
10241 * @remarks In ring-0 not all of the state needs to be synced in.
10242 */
10243VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10244{
10245 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10246
10247 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10248 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
10249 Assert(!pVCpu->iem.s.cActiveMappings);
10250 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10251}
10252
10253
10254/**
10255 * Interface for HM and EM to emulate the INVLPG instruction.
10256 *
10257 * @returns Strict VBox status code.
10258 * @retval VINF_PGM_SYNC_CR3
10259 *
10260 * @param pVCpu The cross context virtual CPU structure.
10261 * @param cbInstr The instruction length in bytes.
10262 * @param GCPtrPage The effective address of the page to invalidate.
10263 *
10264 * @remarks In ring-0 not all of the state needs to be synced in.
10265 */
10266VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
10267{
10268 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10269
10270 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10271 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
10272 Assert(!pVCpu->iem.s.cActiveMappings);
10273 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10274}
10275
10276
10277/**
10278 * Interface for HM and EM to emulate the INVPCID instruction.
10279 *
10280 * @returns Strict VBox status code.
10281 * @retval VINF_PGM_SYNC_CR3
10282 *
10283 * @param pVCpu The cross context virtual CPU structure.
10284 * @param cbInstr The instruction length in bytes.
10285 * @param iEffSeg The effective segment register.
10286 * @param GCPtrDesc The effective address of the INVPCID descriptor.
10287 * @param uType The invalidation type.
10288 *
10289 * @remarks In ring-0 not all of the state needs to be synced in.
10290 */
10291VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
10292 uint64_t uType)
10293{
10294 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
10295
10296 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10297 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
10298 Assert(!pVCpu->iem.s.cActiveMappings);
10299 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10300}
10301
10302
10303/**
10304 * Interface for HM and EM to emulate the CPUID instruction.
10305 *
10306 * @returns Strict VBox status code.
10307 *
10308 * @param pVCpu The cross context virtual CPU structure.
10309 * @param cbInstr The instruction length in bytes.
10310 *
10311 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
10312 */
10313VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
10314{
10315 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10316 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
10317
10318 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10319 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
10320 Assert(!pVCpu->iem.s.cActiveMappings);
10321 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10322}
10323
10324
10325/**
10326 * Interface for HM and EM to emulate the RDPMC instruction.
10327 *
10328 * @returns Strict VBox status code.
10329 *
10330 * @param pVCpu The cross context virtual CPU structure.
10331 * @param cbInstr The instruction length in bytes.
10332 *
10333 * @remarks Not all of the state needs to be synced in.
10334 */
10335VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
10336{
10337 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10338 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10339
10340 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10341 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
10342 Assert(!pVCpu->iem.s.cActiveMappings);
10343 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10344}
10345
10346
10347/**
10348 * Interface for HM and EM to emulate the RDTSC instruction.
10349 *
10350 * @returns Strict VBox status code.
10351 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10352 *
10353 * @param pVCpu The cross context virtual CPU structure.
10354 * @param cbInstr The instruction length in bytes.
10355 *
10356 * @remarks Not all of the state needs to be synced in.
10357 */
10358VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
10359{
10360 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10361 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10362
10363 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10364 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
10365 Assert(!pVCpu->iem.s.cActiveMappings);
10366 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10367}
10368
10369
10370/**
10371 * Interface for HM and EM to emulate the RDTSCP instruction.
10372 *
10373 * @returns Strict VBox status code.
10374 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10375 *
10376 * @param pVCpu The cross context virtual CPU structure.
10377 * @param cbInstr The instruction length in bytes.
10378 *
10379 * @remarks Not all of the state needs to be synced in. Recommended
10380 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
10381 */
10382VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
10383{
10384 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10385 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
10386
10387 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10388 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
10389 Assert(!pVCpu->iem.s.cActiveMappings);
10390 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10391}
10392
10393
10394/**
10395 * Interface for HM and EM to emulate the RDMSR instruction.
10396 *
10397 * @returns Strict VBox status code.
10398 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10399 *
10400 * @param pVCpu The cross context virtual CPU structure.
10401 * @param cbInstr The instruction length in bytes.
10402 *
10403 * @remarks Not all of the state needs to be synced in. Requires RCX and
10404 * (currently) all MSRs.
10405 */
10406VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10407{
10408 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10409 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
10410
10411 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10412 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
10413 Assert(!pVCpu->iem.s.cActiveMappings);
10414 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10415}
10416
10417
10418/**
10419 * Interface for HM and EM to emulate the WRMSR instruction.
10420 *
10421 * @returns Strict VBox status code.
10422 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10423 *
10424 * @param pVCpu The cross context virtual CPU structure.
10425 * @param cbInstr The instruction length in bytes.
10426 *
10427 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
10428 * and (currently) all MSRs.
10429 */
10430VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10431{
10432 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10433 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
10434 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
10435
10436 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10437 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
10438 Assert(!pVCpu->iem.s.cActiveMappings);
10439 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10440}
10441
10442
10443/**
10444 * Interface for HM and EM to emulate the MONITOR instruction.
10445 *
10446 * @returns Strict VBox status code.
10447 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10448 *
10449 * @param pVCpu The cross context virtual CPU structure.
10450 * @param cbInstr The instruction length in bytes.
10451 *
10452 * @remarks Not all of the state needs to be synced in.
10453 * @remarks ASSUMES the default segment of DS and no segment override prefixes
10454 * are used.
10455 */
10456VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
10457{
10458 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10459 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
10460
10461 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10462 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
10463 Assert(!pVCpu->iem.s.cActiveMappings);
10464 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10465}
10466
10467
10468/**
10469 * Interface for HM and EM to emulate the MWAIT instruction.
10470 *
10471 * @returns Strict VBox status code.
10472 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10473 *
10474 * @param pVCpu The cross context virtual CPU structure.
10475 * @param cbInstr The instruction length in bytes.
10476 *
10477 * @remarks Not all of the state needs to be synced in.
10478 */
10479VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
10480{
10481 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10482 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
10483
10484 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10485 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
10486 Assert(!pVCpu->iem.s.cActiveMappings);
10487 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10488}
10489
10490
10491/**
10492 * Interface for HM and EM to emulate the HLT instruction.
10493 *
10494 * @returns Strict VBox status code.
10495 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10496 *
10497 * @param pVCpu The cross context virtual CPU structure.
10498 * @param cbInstr The instruction length in bytes.
10499 *
10500 * @remarks Not all of the state needs to be synced in.
10501 */
10502VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
10503{
10504 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10505
10506 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10507 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
10508 Assert(!pVCpu->iem.s.cActiveMappings);
10509 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10510}
10511
10512
10513/**
10514 * Checks if IEM is in the process of delivering an event (interrupt or
10515 * exception).
10516 *
10517 * @returns true if we're in the process of raising an interrupt or exception,
10518 * false otherwise.
10519 * @param pVCpu The cross context virtual CPU structure.
10520 * @param puVector Where to store the vector associated with the
10521 * currently delivered event, optional.
10522 * @param pfFlags Where to store th event delivery flags (see
10523 * IEM_XCPT_FLAGS_XXX), optional.
10524 * @param puErr Where to store the error code associated with the
10525 * event, optional.
10526 * @param puCr2 Where to store the CR2 associated with the event,
10527 * optional.
10528 * @remarks The caller should check the flags to determine if the error code and
10529 * CR2 are valid for the event.
10530 */
10531VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
10532{
10533 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
10534 if (fRaisingXcpt)
10535 {
10536 if (puVector)
10537 *puVector = pVCpu->iem.s.uCurXcpt;
10538 if (pfFlags)
10539 *pfFlags = pVCpu->iem.s.fCurXcpt;
10540 if (puErr)
10541 *puErr = pVCpu->iem.s.uCurXcptErr;
10542 if (puCr2)
10543 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
10544 }
10545 return fRaisingXcpt;
10546}
10547
10548#ifdef IN_RING3
10549
10550/**
10551 * Handles the unlikely and probably fatal merge cases.
10552 *
10553 * @returns Merged status code.
10554 * @param rcStrict Current EM status code.
10555 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
10556 * with @a rcStrict.
10557 * @param iMemMap The memory mapping index. For error reporting only.
10558 * @param pVCpu The cross context virtual CPU structure of the calling
10559 * thread, for error reporting only.
10560 */
10561DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
10562 unsigned iMemMap, PVMCPUCC pVCpu)
10563{
10564 if (RT_FAILURE_NP(rcStrict))
10565 return rcStrict;
10566
10567 if (RT_FAILURE_NP(rcStrictCommit))
10568 return rcStrictCommit;
10569
10570 if (rcStrict == rcStrictCommit)
10571 return rcStrictCommit;
10572
10573 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
10574 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
10575 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
10576 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
10577 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
10578 return VERR_IOM_FF_STATUS_IPE;
10579}
10580
10581
10582/**
10583 * Helper for IOMR3ProcessForceFlag.
10584 *
10585 * @returns Merged status code.
10586 * @param rcStrict Current EM status code.
10587 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
10588 * with @a rcStrict.
10589 * @param iMemMap The memory mapping index. For error reporting only.
10590 * @param pVCpu The cross context virtual CPU structure of the calling
10591 * thread, for error reporting only.
10592 */
10593DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
10594{
10595 /* Simple. */
10596 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
10597 return rcStrictCommit;
10598
10599 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
10600 return rcStrict;
10601
10602 /* EM scheduling status codes. */
10603 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
10604 && rcStrict <= VINF_EM_LAST))
10605 {
10606 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
10607 && rcStrictCommit <= VINF_EM_LAST))
10608 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
10609 }
10610
10611 /* Unlikely */
10612 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
10613}
10614
10615
10616/**
10617 * Called by force-flag handling code when VMCPU_FF_IEM is set.
10618 *
10619 * @returns Merge between @a rcStrict and what the commit operation returned.
10620 * @param pVM The cross context VM structure.
10621 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10622 * @param rcStrict The status code returned by ring-0 or raw-mode.
10623 */
10624VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
10625{
10626 /*
10627 * Reset the pending commit.
10628 */
10629 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
10630 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
10631 ("%#x %#x %#x\n",
10632 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
10633 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
10634
10635 /*
10636 * Commit the pending bounce buffers (usually just one).
10637 */
10638 unsigned cBufs = 0;
10639 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
10640 while (iMemMap-- > 0)
10641 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
10642 {
10643 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
10644 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
10645 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
10646
10647 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
10648 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
10649 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
10650
10651 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
10652 {
10653 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
10654 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
10655 pbBuf,
10656 cbFirst,
10657 PGMACCESSORIGIN_IEM);
10658 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
10659 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
10660 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
10661 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
10662 }
10663
10664 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
10665 {
10666 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
10667 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
10668 pbBuf + cbFirst,
10669 cbSecond,
10670 PGMACCESSORIGIN_IEM);
10671 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
10672 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
10673 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
10674 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
10675 }
10676 cBufs++;
10677 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
10678 }
10679
10680 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
10681 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
10682 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
10683 pVCpu->iem.s.cActiveMappings = 0;
10684 return rcStrict;
10685}
10686
10687#endif /* IN_RING3 */
10688
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette