VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 103861

Last change on this file since 103861 was 103840, checked in by vboxsync, 13 months ago

VMM/IEM: Implement native emitters for IEM_MC_RAISE_DIVIDE_ERROR() and IEM_MC_IF_LOCAL_IS_Z(), bugref:10371 [comment]

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 449.9 KB
Line 
1/* $Id: IEMAll.cpp 103840 2024-03-14 09:06:05Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_iem IEM - Interpreted Execution Manager
30 *
31 * The interpreted exeuction manager (IEM) is for executing short guest code
32 * sequences that are causing too many exits / virtualization traps. It will
33 * also be used to interpret single instructions, thus replacing the selective
34 * interpreters in EM and IOM.
35 *
36 * Design goals:
37 * - Relatively small footprint, although we favour speed and correctness
38 * over size.
39 * - Reasonably fast.
40 * - Correctly handle lock prefixed instructions.
41 * - Complete instruction set - eventually.
42 * - Refactorable into a recompiler, maybe.
43 * - Replace EMInterpret*.
44 *
45 * Using the existing disassembler has been considered, however this is thought
46 * to conflict with speed as the disassembler chews things a bit too much while
47 * leaving us with a somewhat complicated state to interpret afterwards.
48 *
49 *
50 * The current code is very much work in progress. You've been warned!
51 *
52 *
53 * @section sec_iem_fpu_instr FPU Instructions
54 *
55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
56 * same or equivalent instructions on the host FPU. To make life easy, we also
57 * let the FPU prioritize the unmasked exceptions for us. This however, only
58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
59 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
60 * can trigger spurious FPU exceptions.
61 *
62 * The guest FPU state is not loaded into the host CPU and kept there till we
63 * leave IEM because the calling conventions have declared an all year open
64 * season on much of the FPU state. For instance an innocent looking call to
65 * memcpy might end up using a whole bunch of XMM or MM registers if the
66 * particular implementation finds it worthwhile.
67 *
68 *
69 * @section sec_iem_logging Logging
70 *
71 * The IEM code uses the \"IEM\" log group for the main logging. The different
72 * logging levels/flags are generally used for the following purposes:
73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
74 * - Flow (LogFlow) : Basic enter/exit IEM state info.
75 * - Level 2 (Log2) : ?
76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
78 * - Level 5 (Log5) : Decoding details.
79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
80 * - Level 7 (Log7) : iret++ execution logging.
81 * - Level 8 (Log8) :
82 * - Level 9 (Log9) :
83 * - Level 10 (Log10): TLBs.
84 * - Level 11 (Log11): Unmasked FPU exceptions.
85 *
86 * The \"IEM_MEM\" log group covers most of memory related details logging,
87 * except for errors and exceptions:
88 * - Level 1 (Log) : Reads.
89 * - Level 2 (Log2) : Read fallbacks.
90 * - Level 3 (Log3) : MemMap read.
91 * - Level 4 (Log4) : MemMap read fallbacks.
92 * - Level 5 (Log5) : Writes
93 * - Level 6 (Log6) : Write fallbacks.
94 * - Level 7 (Log7) : MemMap writes and read-writes.
95 * - Level 8 (Log8) : MemMap write and read-write fallbacks.
96 * - Level 9 (Log9) : Stack reads.
97 * - Level 10 (Log10): Stack read fallbacks.
98 * - Level 11 (Log11): Stack writes.
99 * - Level 12 (Log12): Stack write fallbacks.
100 * - Flow (LogFlow) :
101 *
102 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
103 * - Level 1 (Log) : Errors and other major events.
104 * - Flow (LogFlow) : Misc flow stuff (cleanup?)
105 * - Level 2 (Log2) : VM exits.
106 *
107 * The syscall logging level assignments:
108 * - Level 1: DOS and BIOS.
109 * - Level 2: Windows 3.x
110 * - Level 3: Linux.
111 */
112
113/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
114#ifdef _MSC_VER
115# pragma warning(disable:4505)
116#endif
117
118
119/*********************************************************************************************************************************
120* Header Files *
121*********************************************************************************************************************************/
122#define LOG_GROUP LOG_GROUP_IEM
123#define VMCPU_INCL_CPUM_GST_CTX
124#include <VBox/vmm/iem.h>
125#include <VBox/vmm/cpum.h>
126#include <VBox/vmm/apic.h>
127#include <VBox/vmm/pdm.h>
128#include <VBox/vmm/pgm.h>
129#include <VBox/vmm/iom.h>
130#include <VBox/vmm/em.h>
131#include <VBox/vmm/hm.h>
132#include <VBox/vmm/nem.h>
133#include <VBox/vmm/gim.h>
134#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
135# include <VBox/vmm/em.h>
136# include <VBox/vmm/hm_svm.h>
137#endif
138#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
139# include <VBox/vmm/hmvmxinline.h>
140#endif
141#include <VBox/vmm/tm.h>
142#include <VBox/vmm/dbgf.h>
143#include <VBox/vmm/dbgftrace.h>
144#include "IEMInternal.h"
145#include <VBox/vmm/vmcc.h>
146#include <VBox/log.h>
147#include <VBox/err.h>
148#include <VBox/param.h>
149#include <VBox/dis.h>
150#include <iprt/asm-math.h>
151#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
152# include <iprt/asm-amd64-x86.h>
153#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
154# include <iprt/asm-arm.h>
155#endif
156#include <iprt/assert.h>
157#include <iprt/string.h>
158#include <iprt/x86.h>
159
160#include "IEMInline.h"
161
162
163/*********************************************************************************************************************************
164* Structures and Typedefs *
165*********************************************************************************************************************************/
166/**
167 * CPU exception classes.
168 */
169typedef enum IEMXCPTCLASS
170{
171 IEMXCPTCLASS_BENIGN,
172 IEMXCPTCLASS_CONTRIBUTORY,
173 IEMXCPTCLASS_PAGE_FAULT,
174 IEMXCPTCLASS_DOUBLE_FAULT
175} IEMXCPTCLASS;
176
177
178/*********************************************************************************************************************************
179* Global Variables *
180*********************************************************************************************************************************/
181#if defined(IEM_LOG_MEMORY_WRITES)
182/** What IEM just wrote. */
183uint8_t g_abIemWrote[256];
184/** How much IEM just wrote. */
185size_t g_cbIemWrote;
186#endif
187
188
189/*********************************************************************************************************************************
190* Internal Functions *
191*********************************************************************************************************************************/
192static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
193 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
194
195
196/**
197 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags, slow code
198 * path.
199 *
200 * @returns IEM_F_BRK_PENDING_XXX or zero.
201 * @param pVCpu The cross context virtual CPU structure of the
202 * calling thread.
203 *
204 * @note Don't call directly, use iemCalcExecDbgFlags instead.
205 */
206uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu)
207{
208 uint32_t fExec = 0;
209
210 /*
211 * Process guest breakpoints.
212 */
213#define PROCESS_ONE_BP(a_fDr7, a_iBp) do { \
214 if (a_fDr7 & X86_DR7_L_G(a_iBp)) \
215 { \
216 switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \
217 { \
218 case X86_DR7_RW_EO: \
219 fExec |= IEM_F_PENDING_BRK_INSTR; \
220 break; \
221 case X86_DR7_RW_WO: \
222 case X86_DR7_RW_RW: \
223 fExec |= IEM_F_PENDING_BRK_DATA; \
224 break; \
225 case X86_DR7_RW_IO: \
226 fExec |= IEM_F_PENDING_BRK_X86_IO; \
227 break; \
228 } \
229 } \
230 } while (0)
231
232 uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];
233 if (fGstDr7 & X86_DR7_ENABLED_MASK)
234 {
235 PROCESS_ONE_BP(fGstDr7, 0);
236 PROCESS_ONE_BP(fGstDr7, 1);
237 PROCESS_ONE_BP(fGstDr7, 2);
238 PROCESS_ONE_BP(fGstDr7, 3);
239 }
240
241 /*
242 * Process hypervisor breakpoints.
243 */
244 uint32_t const fHyperDr7 = DBGFBpGetDR7(pVCpu->CTX_SUFF(pVM));
245 if (fHyperDr7 & X86_DR7_ENABLED_MASK)
246 {
247 PROCESS_ONE_BP(fHyperDr7, 0);
248 PROCESS_ONE_BP(fHyperDr7, 1);
249 PROCESS_ONE_BP(fHyperDr7, 2);
250 PROCESS_ONE_BP(fHyperDr7, 3);
251 }
252
253 return fExec;
254}
255
256
257/**
258 * Initializes the decoder state.
259 *
260 * iemReInitDecoder is mostly a copy of this function.
261 *
262 * @param pVCpu The cross context virtual CPU structure of the
263 * calling thread.
264 * @param fExecOpts Optional execution flags:
265 * - IEM_F_BYPASS_HANDLERS
266 * - IEM_F_X86_DISREGARD_LOCK
267 */
268DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, uint32_t fExecOpts)
269{
270 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
271 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
272 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
273 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
274 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
275 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
276 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
277 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
278 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
279 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
280
281 /* Execution state: */
282 uint32_t fExec;
283 pVCpu->iem.s.fExec = fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
284
285 /* Decoder state: */
286 pVCpu->iem.s.enmDefAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
287 pVCpu->iem.s.enmEffAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK;
288 if ((fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)
289 {
290 pVCpu->iem.s.enmDefOpSize = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
291 pVCpu->iem.s.enmEffOpSize = fExec & IEM_F_MODE_CPUMODE_MASK;
292 }
293 else
294 {
295 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
296 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
297 }
298 pVCpu->iem.s.fPrefixes = 0;
299 pVCpu->iem.s.uRexReg = 0;
300 pVCpu->iem.s.uRexB = 0;
301 pVCpu->iem.s.uRexIndex = 0;
302 pVCpu->iem.s.idxPrefix = 0;
303 pVCpu->iem.s.uVex3rdReg = 0;
304 pVCpu->iem.s.uVexLength = 0;
305 pVCpu->iem.s.fEvexStuff = 0;
306 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
307#ifdef IEM_WITH_CODE_TLB
308 pVCpu->iem.s.pbInstrBuf = NULL;
309 pVCpu->iem.s.offInstrNextByte = 0;
310 pVCpu->iem.s.offCurInstrStart = 0;
311# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
312 pVCpu->iem.s.offOpcode = 0;
313# endif
314# ifdef VBOX_STRICT
315 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
316 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
317 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
318 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
319# endif
320#else
321 pVCpu->iem.s.offOpcode = 0;
322 pVCpu->iem.s.cbOpcode = 0;
323#endif
324 pVCpu->iem.s.offModRm = 0;
325 pVCpu->iem.s.cActiveMappings = 0;
326 pVCpu->iem.s.iNextMapping = 0;
327 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
328
329#ifdef DBGFTRACE_ENABLED
330 switch (IEM_GET_CPU_MODE(pVCpu))
331 {
332 case IEMMODE_64BIT:
333 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
334 break;
335 case IEMMODE_32BIT:
336 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
337 break;
338 case IEMMODE_16BIT:
339 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
340 break;
341 }
342#endif
343}
344
345
346/**
347 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
348 *
349 * This is mostly a copy of iemInitDecoder.
350 *
351 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
352 */
353DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
354{
355 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
356 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
357 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
358 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
359 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
360 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
361 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
362 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
363 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
364
365 /* ASSUMES: Anyone changing CPU state affecting the fExec bits will update them! */
366 AssertMsg((pVCpu->iem.s.fExec & ~IEM_F_USER_OPTS) == iemCalcExecFlags(pVCpu),
367 ("fExec=%#x iemCalcExecModeFlags=%#x\n", pVCpu->iem.s.fExec, iemCalcExecFlags(pVCpu)));
368
369 IEMMODE const enmMode = IEM_GET_CPU_MODE(pVCpu);
370 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
371 pVCpu->iem.s.enmEffAddrMode = enmMode;
372 if (enmMode != IEMMODE_64BIT)
373 {
374 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
375 pVCpu->iem.s.enmEffOpSize = enmMode;
376 }
377 else
378 {
379 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
380 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
381 }
382 pVCpu->iem.s.fPrefixes = 0;
383 pVCpu->iem.s.uRexReg = 0;
384 pVCpu->iem.s.uRexB = 0;
385 pVCpu->iem.s.uRexIndex = 0;
386 pVCpu->iem.s.idxPrefix = 0;
387 pVCpu->iem.s.uVex3rdReg = 0;
388 pVCpu->iem.s.uVexLength = 0;
389 pVCpu->iem.s.fEvexStuff = 0;
390 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
391#ifdef IEM_WITH_CODE_TLB
392 if (pVCpu->iem.s.pbInstrBuf)
393 {
394 uint64_t off = (enmMode == IEMMODE_64BIT
395 ? pVCpu->cpum.GstCtx.rip
396 : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
397 - pVCpu->iem.s.uInstrBufPc;
398 if (off < pVCpu->iem.s.cbInstrBufTotal)
399 {
400 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
401 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
402 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
403 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
404 else
405 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
406 }
407 else
408 {
409 pVCpu->iem.s.pbInstrBuf = NULL;
410 pVCpu->iem.s.offInstrNextByte = 0;
411 pVCpu->iem.s.offCurInstrStart = 0;
412 pVCpu->iem.s.cbInstrBuf = 0;
413 pVCpu->iem.s.cbInstrBufTotal = 0;
414 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
415 }
416 }
417 else
418 {
419 pVCpu->iem.s.offInstrNextByte = 0;
420 pVCpu->iem.s.offCurInstrStart = 0;
421 pVCpu->iem.s.cbInstrBuf = 0;
422 pVCpu->iem.s.cbInstrBufTotal = 0;
423# ifdef VBOX_STRICT
424 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
425# endif
426 }
427# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
428 pVCpu->iem.s.offOpcode = 0;
429# endif
430#else /* !IEM_WITH_CODE_TLB */
431 pVCpu->iem.s.cbOpcode = 0;
432 pVCpu->iem.s.offOpcode = 0;
433#endif /* !IEM_WITH_CODE_TLB */
434 pVCpu->iem.s.offModRm = 0;
435 Assert(pVCpu->iem.s.cActiveMappings == 0);
436 pVCpu->iem.s.iNextMapping = 0;
437 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
438 Assert(!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS));
439
440#ifdef DBGFTRACE_ENABLED
441 switch (enmMode)
442 {
443 case IEMMODE_64BIT:
444 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
445 break;
446 case IEMMODE_32BIT:
447 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
448 break;
449 case IEMMODE_16BIT:
450 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
451 break;
452 }
453#endif
454}
455
456
457
458/**
459 * Prefetch opcodes the first time when starting executing.
460 *
461 * @returns Strict VBox status code.
462 * @param pVCpu The cross context virtual CPU structure of the
463 * calling thread.
464 * @param fExecOpts Optional execution flags:
465 * - IEM_F_BYPASS_HANDLERS
466 * - IEM_F_X86_DISREGARD_LOCK
467 */
468static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
469{
470 iemInitDecoder(pVCpu, fExecOpts);
471
472#ifndef IEM_WITH_CODE_TLB
473 /*
474 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
475 *
476 * First translate CS:rIP to a physical address.
477 *
478 * Note! The iemOpcodeFetchMoreBytes code depends on this here code to fetch
479 * all relevant bytes from the first page, as it ASSUMES it's only ever
480 * called for dealing with CS.LIM, page crossing and instructions that
481 * are too long.
482 */
483 uint32_t cbToTryRead;
484 RTGCPTR GCPtrPC;
485 if (IEM_IS_64BIT_CODE(pVCpu))
486 {
487 cbToTryRead = GUEST_PAGE_SIZE;
488 GCPtrPC = pVCpu->cpum.GstCtx.rip;
489 if (IEM_IS_CANONICAL(GCPtrPC))
490 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
491 else
492 return iemRaiseGeneralProtectionFault0(pVCpu);
493 }
494 else
495 {
496 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
497 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu), ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
498 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
499 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
500 else
501 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
502 if (cbToTryRead) { /* likely */ }
503 else /* overflowed */
504 {
505 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
506 cbToTryRead = UINT32_MAX;
507 }
508 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
509 Assert(GCPtrPC <= UINT32_MAX);
510 }
511
512 PGMPTWALK Walk;
513 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk);
514 if (RT_SUCCESS(rc))
515 Assert(Walk.fSucceeded); /* probable. */
516 else
517 {
518 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
519# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
520 if (Walk.fFailed & PGM_WALKFAIL_EPT)
521 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
522# endif
523 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);
524 }
525 if ((Walk.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3) { /* likely */ }
526 else
527 {
528 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
529# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
530 if (Walk.fFailed & PGM_WALKFAIL_EPT)
531 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
532# endif
533 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
534 }
535 if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
536 else
537 {
538 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
539# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
540 if (Walk.fFailed & PGM_WALKFAIL_EPT)
541 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
542# endif
543 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
544 }
545 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
546 /** @todo Check reserved bits and such stuff. PGM is better at doing
547 * that, so do it when implementing the guest virtual address
548 * TLB... */
549
550 /*
551 * Read the bytes at this address.
552 */
553 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
554 if (cbToTryRead > cbLeftOnPage)
555 cbToTryRead = cbLeftOnPage;
556 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
557 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
558
559 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
560 {
561 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
562 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
563 { /* likely */ }
564 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
565 {
566 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
567 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
568 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
569 }
570 else
571 {
572 Log((RT_SUCCESS(rcStrict)
573 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
574 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
575 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
576 return rcStrict;
577 }
578 }
579 else
580 {
581 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
582 if (RT_SUCCESS(rc))
583 { /* likely */ }
584 else
585 {
586 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
587 GCPtrPC, GCPhys, rc, cbToTryRead));
588 return rc;
589 }
590 }
591 pVCpu->iem.s.cbOpcode = cbToTryRead;
592#endif /* !IEM_WITH_CODE_TLB */
593 return VINF_SUCCESS;
594}
595
596
597/**
598 * Invalidates the IEM TLBs.
599 *
600 * This is called internally as well as by PGM when moving GC mappings.
601 *
602 * @param pVCpu The cross context virtual CPU structure of the calling
603 * thread.
604 */
605VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
606{
607#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
608 Log10(("IEMTlbInvalidateAll\n"));
609# ifdef IEM_WITH_CODE_TLB
610 pVCpu->iem.s.cbInstrBufTotal = 0;
611 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
612 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
613 { /* very likely */ }
614 else
615 {
616 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
617 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
618 while (i-- > 0)
619 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
620 }
621# endif
622
623# ifdef IEM_WITH_DATA_TLB
624 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
625 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
626 { /* very likely */ }
627 else
628 {
629 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
630 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
631 while (i-- > 0)
632 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
633 }
634# endif
635#else
636 RT_NOREF(pVCpu);
637#endif
638}
639
640
641/**
642 * Invalidates a page in the TLBs.
643 *
644 * @param pVCpu The cross context virtual CPU structure of the calling
645 * thread.
646 * @param GCPtr The address of the page to invalidate
647 * @thread EMT(pVCpu)
648 */
649VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
650{
651#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
652 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
653 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
654 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
655 uintptr_t const idx = IEMTLB_TAG_TO_INDEX(GCPtr);
656
657# ifdef IEM_WITH_CODE_TLB
658 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
659 {
660 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
661 if (GCPtr == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
662 pVCpu->iem.s.cbInstrBufTotal = 0;
663 }
664# endif
665
666# ifdef IEM_WITH_DATA_TLB
667 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
668 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
669# endif
670#else
671 NOREF(pVCpu); NOREF(GCPtr);
672#endif
673}
674
675
676#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
677/**
678 * Invalid both TLBs slow fashion following a rollover.
679 *
680 * Worker for IEMTlbInvalidateAllPhysical,
681 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
682 * iemMemMapJmp and others.
683 *
684 * @thread EMT(pVCpu)
685 */
686static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
687{
688 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
689 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
690 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
691
692 unsigned i;
693# ifdef IEM_WITH_CODE_TLB
694 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
695 while (i-- > 0)
696 {
697 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
698 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
699 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
700 }
701# endif
702# ifdef IEM_WITH_DATA_TLB
703 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
704 while (i-- > 0)
705 {
706 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
707 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
708 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
709 }
710# endif
711
712}
713#endif
714
715
716/**
717 * Invalidates the host physical aspects of the IEM TLBs.
718 *
719 * This is called internally as well as by PGM when moving GC mappings.
720 *
721 * @param pVCpu The cross context virtual CPU structure of the calling
722 * thread.
723 * @note Currently not used.
724 */
725VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
726{
727#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
728 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
729 Log10(("IEMTlbInvalidateAllPhysical\n"));
730
731# ifdef IEM_WITH_CODE_TLB
732 pVCpu->iem.s.cbInstrBufTotal = 0;
733# endif
734 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
735 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
736 {
737 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
738 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
739 }
740 else
741 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
742#else
743 NOREF(pVCpu);
744#endif
745}
746
747
748/**
749 * Invalidates the host physical aspects of the IEM TLBs.
750 *
751 * This is called internally as well as by PGM when moving GC mappings.
752 *
753 * @param pVM The cross context VM structure.
754 * @param idCpuCaller The ID of the calling EMT if available to the caller,
755 * otherwise NIL_VMCPUID.
756 * @param enmReason The reason we're called.
757 *
758 * @remarks Caller holds the PGM lock.
759 */
760VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller, IEMTLBPHYSFLUSHREASON enmReason)
761{
762#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
763 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
764 if (pVCpuCaller)
765 VMCPU_ASSERT_EMT(pVCpuCaller);
766 Log10(("IEMTlbInvalidateAllPhysicalAllCpus: %d\n", enmReason)); RT_NOREF(enmReason);
767
768 VMCC_FOR_EACH_VMCPU(pVM)
769 {
770# ifdef IEM_WITH_CODE_TLB
771 if (pVCpuCaller == pVCpu)
772 pVCpu->iem.s.cbInstrBufTotal = 0;
773# endif
774
775 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
776 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
777 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
778 { /* likely */}
779 else if (pVCpuCaller != pVCpu)
780 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
781 else
782 {
783 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
784 continue;
785 }
786 ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
787 ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
788 }
789 VMCC_FOR_EACH_VMCPU_END(pVM);
790
791#else
792 RT_NOREF(pVM, idCpuCaller, enmReason);
793#endif
794}
795
796
797/**
798 * Flushes the prefetch buffer, light version.
799 */
800void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr)
801{
802#ifndef IEM_WITH_CODE_TLB
803 pVCpu->iem.s.cbOpcode = cbInstr;
804#else
805 RT_NOREF(pVCpu, cbInstr);
806#endif
807}
808
809
810/**
811 * Flushes the prefetch buffer, heavy version.
812 */
813void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr)
814{
815#ifndef IEM_WITH_CODE_TLB
816 pVCpu->iem.s.cbOpcode = cbInstr; /* Note! SVM and VT-x may set this to zero on exit, rather than the instruction length. */
817#elif 1
818 pVCpu->iem.s.cbInstrBufTotal = 0;
819 RT_NOREF(cbInstr);
820#else
821 RT_NOREF(pVCpu, cbInstr);
822#endif
823}
824
825
826
827#ifdef IEM_WITH_CODE_TLB
828
829/**
830 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
831 * failure and jumps.
832 *
833 * We end up here for a number of reasons:
834 * - pbInstrBuf isn't yet initialized.
835 * - Advancing beyond the buffer boundrary (e.g. cross page).
836 * - Advancing beyond the CS segment limit.
837 * - Fetching from non-mappable page (e.g. MMIO).
838 *
839 * @param pVCpu The cross context virtual CPU structure of the
840 * calling thread.
841 * @param pvDst Where to return the bytes.
842 * @param cbDst Number of bytes to read. A value of zero is
843 * allowed for initializing pbInstrBuf (the
844 * recompiler does this). In this case it is best
845 * to set pbInstrBuf to NULL prior to the call.
846 */
847void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP
848{
849# ifdef IN_RING3
850 for (;;)
851 {
852 Assert(cbDst <= 8);
853 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
854
855 /*
856 * We might have a partial buffer match, deal with that first to make the
857 * rest simpler. This is the first part of the cross page/buffer case.
858 */
859 if (pVCpu->iem.s.pbInstrBuf != NULL)
860 {
861 if (offBuf < pVCpu->iem.s.cbInstrBuf)
862 {
863 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
864 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
865 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
866
867 cbDst -= cbCopy;
868 pvDst = (uint8_t *)pvDst + cbCopy;
869 offBuf += cbCopy;
870 pVCpu->iem.s.offInstrNextByte += offBuf;
871 }
872 }
873
874 /*
875 * Check segment limit, figuring how much we're allowed to access at this point.
876 *
877 * We will fault immediately if RIP is past the segment limit / in non-canonical
878 * territory. If we do continue, there are one or more bytes to read before we
879 * end up in trouble and we need to do that first before faulting.
880 */
881 RTGCPTR GCPtrFirst;
882 uint32_t cbMaxRead;
883 if (IEM_IS_64BIT_CODE(pVCpu))
884 {
885 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
886 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
887 { /* likely */ }
888 else
889 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
890 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
891 }
892 else
893 {
894 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
895 /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
896 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
897 { /* likely */ }
898 else /** @todo For CPUs older than the 386, we should not necessarily generate \#GP here but wrap around! */
899 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
900 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
901 if (cbMaxRead != 0)
902 { /* likely */ }
903 else
904 {
905 /* Overflowed because address is 0 and limit is max. */
906 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
907 cbMaxRead = X86_PAGE_SIZE;
908 }
909 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
910 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
911 if (cbMaxRead2 < cbMaxRead)
912 cbMaxRead = cbMaxRead2;
913 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
914 }
915
916 /*
917 * Get the TLB entry for this piece of code.
918 */
919 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.CodeTlb, GCPtrFirst);
920 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.CodeTlb, uTag);
921 if (pTlbe->uTag == uTag)
922 {
923 /* likely when executing lots of code, otherwise unlikely */
924# ifdef VBOX_WITH_STATISTICS
925 pVCpu->iem.s.CodeTlb.cTlbHits++;
926# endif
927 }
928 else
929 {
930 pVCpu->iem.s.CodeTlb.cTlbMisses++;
931 PGMPTWALK Walk;
932 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk);
933 if (RT_FAILURE(rc))
934 {
935#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
936 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? */
937 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
938#endif
939 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
940 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, rc);
941 }
942
943 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
944 Assert(Walk.fSucceeded);
945 pTlbe->uTag = uTag;
946 pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
947 | (Walk.fEffective >> X86_PTE_PAE_BIT_NX);
948 pTlbe->GCPhys = Walk.GCPhys;
949 pTlbe->pbMappingR3 = NULL;
950 }
951
952 /*
953 * Check TLB page table level access flags.
954 */
955 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
956 {
957 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && IEM_GET_CPL(pVCpu) == 3)
958 {
959 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
960 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
961 }
962 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
963 {
964 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
965 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
966 }
967 }
968
969 /*
970 * Set the accessed flags.
971 * ASSUMES this is set when the address is translated rather than on commit...
972 */
973 /** @todo testcase: check when the A bit are actually set by the CPU for code. */
974 if (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED)
975 {
976 int rc2 = PGMGstModifyPage(pVCpu, GCPtrFirst, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
977 AssertRC(rc2);
978 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
979 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
980 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_F_PT_NO_ACCESSED;
981 }
982
983 /*
984 * Look up the physical page info if necessary.
985 */
986 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
987 { /* not necessary */ }
988 else
989 {
990 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
991 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
992 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
993 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
994 AssertCompile(PGMIEMGCPHYS2PTR_F_CODE_PAGE == IEMTLBE_F_PG_CODE_PAGE);
995 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
996 { /* likely */ }
997 else
998 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
999 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1000 | IEMTLBE_F_NO_MAPPINGR3
1001 | IEMTLBE_F_PG_NO_READ
1002 | IEMTLBE_F_PG_NO_WRITE
1003 | IEMTLBE_F_PG_UNASSIGNED
1004 | IEMTLBE_F_PG_CODE_PAGE);
1005 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1006 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1007 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
1008 }
1009
1010# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
1011 /*
1012 * Try do a direct read using the pbMappingR3 pointer.
1013 */
1014 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1015 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1016 {
1017 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1018 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1019 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1020 {
1021 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1022 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1023 }
1024 else
1025 {
1026 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1027 if (cbInstr + (uint32_t)cbDst <= 15)
1028 {
1029 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1030 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1031 }
1032 else
1033 {
1034 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1035 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1036 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1037 }
1038 }
1039 if (cbDst <= cbMaxRead)
1040 {
1041 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0; /** @todo Spurious load effect on branch handling? */
1042 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1043
1044 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1045 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1046 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1047 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1048 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1049 return;
1050 }
1051 pVCpu->iem.s.pbInstrBuf = NULL;
1052
1053 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1054 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1055 }
1056# else
1057# error "refactor as needed"
1058 /*
1059 * If there is no special read handling, so we can read a bit more and
1060 * put it in the prefetch buffer.
1061 */
1062 if ( cbDst < cbMaxRead
1063 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1064 {
1065 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1066 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1067 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1068 { /* likely */ }
1069 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1070 {
1071 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1072 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1073 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1074 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));
1075 }
1076 else
1077 {
1078 Log((RT_SUCCESS(rcStrict)
1079 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1080 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1081 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1082 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1083 }
1084 }
1085# endif
1086 /*
1087 * Special read handling, so only read exactly what's needed.
1088 * This is a highly unlikely scenario.
1089 */
1090 else
1091 {
1092 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1093
1094 /* Check instruction length. */
1095 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1096 if (RT_LIKELY(cbInstr + cbDst <= 15))
1097 { /* likely */ }
1098 else
1099 {
1100 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0) [slow]\n",
1101 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1102 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1103 }
1104
1105 /* Do the reading. */
1106 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1107 if (cbToRead > 0)
1108 {
1109 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1110 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1111 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1112 { /* likely */ }
1113 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1114 {
1115 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1116 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1117 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1118 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));
1119 }
1120 else
1121 {
1122 Log((RT_SUCCESS(rcStrict)
1123 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1124 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1125 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1126 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1127 }
1128 }
1129
1130 /* Update the state and probably return. */
1131 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1132 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0;
1133 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1134
1135 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1136 pVCpu->iem.s.offInstrNextByte = offPg + cbInstr + cbToRead;
1137 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead + cbInstr) - cbToRead - cbInstr;
1138 pVCpu->iem.s.cbInstrBufTotal = X86_PAGE_SIZE; /** @todo ??? */
1139 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1140 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1141 pVCpu->iem.s.pbInstrBuf = NULL;
1142 if (cbToRead == cbDst)
1143 return;
1144 }
1145
1146 /*
1147 * More to read, loop.
1148 */
1149 cbDst -= cbMaxRead;
1150 pvDst = (uint8_t *)pvDst + cbMaxRead;
1151 }
1152# else /* !IN_RING3 */
1153 RT_NOREF(pvDst, cbDst);
1154 if (pvDst || cbDst)
1155 IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);
1156# endif /* !IN_RING3 */
1157}
1158
1159#else /* !IEM_WITH_CODE_TLB */
1160
1161/**
1162 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1163 * exception if it fails.
1164 *
1165 * @returns Strict VBox status code.
1166 * @param pVCpu The cross context virtual CPU structure of the
1167 * calling thread.
1168 * @param cbMin The minimum number of bytes relative offOpcode
1169 * that must be read.
1170 */
1171VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
1172{
1173 /*
1174 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1175 *
1176 * First translate CS:rIP to a physical address.
1177 */
1178 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;
1179 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1180 uint8_t const cbLeft = cbOpcode - offOpcode;
1181 Assert(cbLeft < cbMin);
1182 Assert(cbOpcode <= sizeof(pVCpu->iem.s.abOpcode));
1183
1184 uint32_t cbToTryRead;
1185 RTGCPTR GCPtrNext;
1186 if (IEM_IS_64BIT_CODE(pVCpu))
1187 {
1188 GCPtrNext = pVCpu->cpum.GstCtx.rip + cbOpcode;
1189 if (!IEM_IS_CANONICAL(GCPtrNext))
1190 return iemRaiseGeneralProtectionFault0(pVCpu);
1191 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1192 }
1193 else
1194 {
1195 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1196 /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
1197 GCPtrNext32 += cbOpcode;
1198 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1199 /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
1200 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1201 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1202 if (!cbToTryRead) /* overflowed */
1203 {
1204 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1205 cbToTryRead = UINT32_MAX;
1206 /** @todo check out wrapping around the code segment. */
1207 }
1208 if (cbToTryRead < cbMin - cbLeft)
1209 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1210 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1211
1212 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1213 if (cbToTryRead > cbLeftOnPage)
1214 cbToTryRead = cbLeftOnPage;
1215 }
1216
1217 /* Restrict to opcode buffer space.
1218
1219 We're making ASSUMPTIONS here based on work done previously in
1220 iemInitDecoderAndPrefetchOpcodes, where bytes from the first page will
1221 be fetched in case of an instruction crossing two pages. */
1222 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - cbOpcode)
1223 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - cbOpcode;
1224 if (RT_LIKELY(cbToTryRead + cbLeft >= cbMin))
1225 { /* likely */ }
1226 else
1227 {
1228 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1229 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, offOpcode, cbMin));
1230 return iemRaiseGeneralProtectionFault0(pVCpu);
1231 }
1232
1233 PGMPTWALK Walk;
1234 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk);
1235 if (RT_FAILURE(rc))
1236 {
1237 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1238#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1239 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1240 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1241#endif
1242 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc);
1243 }
1244 if (!(Walk.fEffective & X86_PTE_US) && IEM_GET_CPL(pVCpu) == 3)
1245 {
1246 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1247#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1248 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1249 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1250#endif
1251 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1252 }
1253 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1254 {
1255 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1256#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1257 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1258 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1259#endif
1260 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1261 }
1262 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1263 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode));
1264 /** @todo Check reserved bits and such stuff. PGM is better at doing
1265 * that, so do it when implementing the guest virtual address
1266 * TLB... */
1267
1268 /*
1269 * Read the bytes at this address.
1270 *
1271 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1272 * and since PATM should only patch the start of an instruction there
1273 * should be no need to check again here.
1274 */
1275 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
1276 {
1277 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[cbOpcode],
1278 cbToTryRead, PGMACCESSORIGIN_IEM);
1279 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1280 { /* likely */ }
1281 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1282 {
1283 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1284 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1285 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1286 }
1287 else
1288 {
1289 Log((RT_SUCCESS(rcStrict)
1290 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1291 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1292 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1293 return rcStrict;
1294 }
1295 }
1296 else
1297 {
1298 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[cbOpcode], GCPhys, cbToTryRead);
1299 if (RT_SUCCESS(rc))
1300 { /* likely */ }
1301 else
1302 {
1303 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1304 return rc;
1305 }
1306 }
1307 pVCpu->iem.s.cbOpcode = cbOpcode + cbToTryRead;
1308 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1309
1310 return VINF_SUCCESS;
1311}
1312
1313#endif /* !IEM_WITH_CODE_TLB */
1314#ifndef IEM_WITH_SETJMP
1315
1316/**
1317 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1318 *
1319 * @returns Strict VBox status code.
1320 * @param pVCpu The cross context virtual CPU structure of the
1321 * calling thread.
1322 * @param pb Where to return the opcode byte.
1323 */
1324VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1325{
1326 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1327 if (rcStrict == VINF_SUCCESS)
1328 {
1329 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1330 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1331 pVCpu->iem.s.offOpcode = offOpcode + 1;
1332 }
1333 else
1334 *pb = 0;
1335 return rcStrict;
1336}
1337
1338#else /* IEM_WITH_SETJMP */
1339
1340/**
1341 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1342 *
1343 * @returns The opcode byte.
1344 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1345 */
1346uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1347{
1348# ifdef IEM_WITH_CODE_TLB
1349 uint8_t u8;
1350 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1351 return u8;
1352# else
1353 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1354 if (rcStrict == VINF_SUCCESS)
1355 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1356 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1357# endif
1358}
1359
1360#endif /* IEM_WITH_SETJMP */
1361
1362#ifndef IEM_WITH_SETJMP
1363
1364/**
1365 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1366 *
1367 * @returns Strict VBox status code.
1368 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1369 * @param pu16 Where to return the opcode dword.
1370 */
1371VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1372{
1373 uint8_t u8;
1374 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1375 if (rcStrict == VINF_SUCCESS)
1376 *pu16 = (int8_t)u8;
1377 return rcStrict;
1378}
1379
1380
1381/**
1382 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1383 *
1384 * @returns Strict VBox status code.
1385 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1386 * @param pu32 Where to return the opcode dword.
1387 */
1388VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1389{
1390 uint8_t u8;
1391 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1392 if (rcStrict == VINF_SUCCESS)
1393 *pu32 = (int8_t)u8;
1394 return rcStrict;
1395}
1396
1397
1398/**
1399 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1400 *
1401 * @returns Strict VBox status code.
1402 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1403 * @param pu64 Where to return the opcode qword.
1404 */
1405VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1406{
1407 uint8_t u8;
1408 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1409 if (rcStrict == VINF_SUCCESS)
1410 *pu64 = (int8_t)u8;
1411 return rcStrict;
1412}
1413
1414#endif /* !IEM_WITH_SETJMP */
1415
1416
1417#ifndef IEM_WITH_SETJMP
1418
1419/**
1420 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1421 *
1422 * @returns Strict VBox status code.
1423 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1424 * @param pu16 Where to return the opcode word.
1425 */
1426VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1427{
1428 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1429 if (rcStrict == VINF_SUCCESS)
1430 {
1431 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1432# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1433 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1434# else
1435 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1436# endif
1437 pVCpu->iem.s.offOpcode = offOpcode + 2;
1438 }
1439 else
1440 *pu16 = 0;
1441 return rcStrict;
1442}
1443
1444#else /* IEM_WITH_SETJMP */
1445
1446/**
1447 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1448 *
1449 * @returns The opcode word.
1450 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1451 */
1452uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1453{
1454# ifdef IEM_WITH_CODE_TLB
1455 uint16_t u16;
1456 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1457 return u16;
1458# else
1459 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1460 if (rcStrict == VINF_SUCCESS)
1461 {
1462 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1463 pVCpu->iem.s.offOpcode += 2;
1464# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1465 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1466# else
1467 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1468# endif
1469 }
1470 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1471# endif
1472}
1473
1474#endif /* IEM_WITH_SETJMP */
1475
1476#ifndef IEM_WITH_SETJMP
1477
1478/**
1479 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1480 *
1481 * @returns Strict VBox status code.
1482 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1483 * @param pu32 Where to return the opcode double word.
1484 */
1485VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1486{
1487 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1488 if (rcStrict == VINF_SUCCESS)
1489 {
1490 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1491 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1492 pVCpu->iem.s.offOpcode = offOpcode + 2;
1493 }
1494 else
1495 *pu32 = 0;
1496 return rcStrict;
1497}
1498
1499
1500/**
1501 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1502 *
1503 * @returns Strict VBox status code.
1504 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1505 * @param pu64 Where to return the opcode quad word.
1506 */
1507VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1508{
1509 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1510 if (rcStrict == VINF_SUCCESS)
1511 {
1512 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1513 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1514 pVCpu->iem.s.offOpcode = offOpcode + 2;
1515 }
1516 else
1517 *pu64 = 0;
1518 return rcStrict;
1519}
1520
1521#endif /* !IEM_WITH_SETJMP */
1522
1523#ifndef IEM_WITH_SETJMP
1524
1525/**
1526 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1527 *
1528 * @returns Strict VBox status code.
1529 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1530 * @param pu32 Where to return the opcode dword.
1531 */
1532VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1533{
1534 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1535 if (rcStrict == VINF_SUCCESS)
1536 {
1537 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1538# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1539 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1540# else
1541 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1542 pVCpu->iem.s.abOpcode[offOpcode + 1],
1543 pVCpu->iem.s.abOpcode[offOpcode + 2],
1544 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1545# endif
1546 pVCpu->iem.s.offOpcode = offOpcode + 4;
1547 }
1548 else
1549 *pu32 = 0;
1550 return rcStrict;
1551}
1552
1553#else /* IEM_WITH_SETJMP */
1554
1555/**
1556 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1557 *
1558 * @returns The opcode dword.
1559 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1560 */
1561uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1562{
1563# ifdef IEM_WITH_CODE_TLB
1564 uint32_t u32;
1565 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1566 return u32;
1567# else
1568 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1569 if (rcStrict == VINF_SUCCESS)
1570 {
1571 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1572 pVCpu->iem.s.offOpcode = offOpcode + 4;
1573# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1574 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1575# else
1576 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1577 pVCpu->iem.s.abOpcode[offOpcode + 1],
1578 pVCpu->iem.s.abOpcode[offOpcode + 2],
1579 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1580# endif
1581 }
1582 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1583# endif
1584}
1585
1586#endif /* IEM_WITH_SETJMP */
1587
1588#ifndef IEM_WITH_SETJMP
1589
1590/**
1591 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1592 *
1593 * @returns Strict VBox status code.
1594 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1595 * @param pu64 Where to return the opcode dword.
1596 */
1597VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1598{
1599 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1600 if (rcStrict == VINF_SUCCESS)
1601 {
1602 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1603 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1604 pVCpu->iem.s.abOpcode[offOpcode + 1],
1605 pVCpu->iem.s.abOpcode[offOpcode + 2],
1606 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1607 pVCpu->iem.s.offOpcode = offOpcode + 4;
1608 }
1609 else
1610 *pu64 = 0;
1611 return rcStrict;
1612}
1613
1614
1615/**
1616 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1617 *
1618 * @returns Strict VBox status code.
1619 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1620 * @param pu64 Where to return the opcode qword.
1621 */
1622VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1623{
1624 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1625 if (rcStrict == VINF_SUCCESS)
1626 {
1627 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1628 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1629 pVCpu->iem.s.abOpcode[offOpcode + 1],
1630 pVCpu->iem.s.abOpcode[offOpcode + 2],
1631 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1632 pVCpu->iem.s.offOpcode = offOpcode + 4;
1633 }
1634 else
1635 *pu64 = 0;
1636 return rcStrict;
1637}
1638
1639#endif /* !IEM_WITH_SETJMP */
1640
1641#ifndef IEM_WITH_SETJMP
1642
1643/**
1644 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1645 *
1646 * @returns Strict VBox status code.
1647 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1648 * @param pu64 Where to return the opcode qword.
1649 */
1650VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1651{
1652 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1653 if (rcStrict == VINF_SUCCESS)
1654 {
1655 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1656# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1657 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1658# else
1659 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1660 pVCpu->iem.s.abOpcode[offOpcode + 1],
1661 pVCpu->iem.s.abOpcode[offOpcode + 2],
1662 pVCpu->iem.s.abOpcode[offOpcode + 3],
1663 pVCpu->iem.s.abOpcode[offOpcode + 4],
1664 pVCpu->iem.s.abOpcode[offOpcode + 5],
1665 pVCpu->iem.s.abOpcode[offOpcode + 6],
1666 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1667# endif
1668 pVCpu->iem.s.offOpcode = offOpcode + 8;
1669 }
1670 else
1671 *pu64 = 0;
1672 return rcStrict;
1673}
1674
1675#else /* IEM_WITH_SETJMP */
1676
1677/**
1678 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1679 *
1680 * @returns The opcode qword.
1681 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1682 */
1683uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1684{
1685# ifdef IEM_WITH_CODE_TLB
1686 uint64_t u64;
1687 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1688 return u64;
1689# else
1690 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1691 if (rcStrict == VINF_SUCCESS)
1692 {
1693 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1694 pVCpu->iem.s.offOpcode = offOpcode + 8;
1695# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1696 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1697# else
1698 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1699 pVCpu->iem.s.abOpcode[offOpcode + 1],
1700 pVCpu->iem.s.abOpcode[offOpcode + 2],
1701 pVCpu->iem.s.abOpcode[offOpcode + 3],
1702 pVCpu->iem.s.abOpcode[offOpcode + 4],
1703 pVCpu->iem.s.abOpcode[offOpcode + 5],
1704 pVCpu->iem.s.abOpcode[offOpcode + 6],
1705 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1706# endif
1707 }
1708 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1709# endif
1710}
1711
1712#endif /* IEM_WITH_SETJMP */
1713
1714
1715
1716/** @name Misc Worker Functions.
1717 * @{
1718 */
1719
1720/**
1721 * Gets the exception class for the specified exception vector.
1722 *
1723 * @returns The class of the specified exception.
1724 * @param uVector The exception vector.
1725 */
1726static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
1727{
1728 Assert(uVector <= X86_XCPT_LAST);
1729 switch (uVector)
1730 {
1731 case X86_XCPT_DE:
1732 case X86_XCPT_TS:
1733 case X86_XCPT_NP:
1734 case X86_XCPT_SS:
1735 case X86_XCPT_GP:
1736 case X86_XCPT_SX: /* AMD only */
1737 return IEMXCPTCLASS_CONTRIBUTORY;
1738
1739 case X86_XCPT_PF:
1740 case X86_XCPT_VE: /* Intel only */
1741 return IEMXCPTCLASS_PAGE_FAULT;
1742
1743 case X86_XCPT_DF:
1744 return IEMXCPTCLASS_DOUBLE_FAULT;
1745 }
1746 return IEMXCPTCLASS_BENIGN;
1747}
1748
1749
1750/**
1751 * Evaluates how to handle an exception caused during delivery of another event
1752 * (exception / interrupt).
1753 *
1754 * @returns How to handle the recursive exception.
1755 * @param pVCpu The cross context virtual CPU structure of the
1756 * calling thread.
1757 * @param fPrevFlags The flags of the previous event.
1758 * @param uPrevVector The vector of the previous event.
1759 * @param fCurFlags The flags of the current exception.
1760 * @param uCurVector The vector of the current exception.
1761 * @param pfXcptRaiseInfo Where to store additional information about the
1762 * exception condition. Optional.
1763 */
1764VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
1765 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
1766{
1767 /*
1768 * Only CPU exceptions can be raised while delivering other events, software interrupt
1769 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
1770 */
1771 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
1772 Assert(pVCpu); RT_NOREF(pVCpu);
1773 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
1774
1775 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
1776 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
1777 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1778 {
1779 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
1780 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
1781 {
1782 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
1783 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
1784 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
1785 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
1786 {
1787 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1788 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
1789 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
1790 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
1791 uCurVector, pVCpu->cpum.GstCtx.cr2));
1792 }
1793 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1794 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
1795 {
1796 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1797 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
1798 }
1799 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
1800 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1801 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
1802 {
1803 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
1804 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
1805 }
1806 }
1807 else
1808 {
1809 if (uPrevVector == X86_XCPT_NMI)
1810 {
1811 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
1812 if (uCurVector == X86_XCPT_PF)
1813 {
1814 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
1815 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
1816 }
1817 }
1818 else if ( uPrevVector == X86_XCPT_AC
1819 && uCurVector == X86_XCPT_AC)
1820 {
1821 enmRaise = IEMXCPTRAISE_CPU_HANG;
1822 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
1823 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
1824 }
1825 }
1826 }
1827 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
1828 {
1829 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
1830 if (uCurVector == X86_XCPT_PF)
1831 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
1832 }
1833 else
1834 {
1835 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
1836 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
1837 }
1838
1839 if (pfXcptRaiseInfo)
1840 *pfXcptRaiseInfo = fRaiseInfo;
1841 return enmRaise;
1842}
1843
1844
1845/**
1846 * Enters the CPU shutdown state initiated by a triple fault or other
1847 * unrecoverable conditions.
1848 *
1849 * @returns Strict VBox status code.
1850 * @param pVCpu The cross context virtual CPU structure of the
1851 * calling thread.
1852 */
1853static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
1854{
1855 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1856 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
1857
1858 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
1859 {
1860 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
1861 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1862 }
1863
1864 RT_NOREF(pVCpu);
1865 return VINF_EM_TRIPLE_FAULT;
1866}
1867
1868
1869/**
1870 * Validates a new SS segment.
1871 *
1872 * @returns VBox strict status code.
1873 * @param pVCpu The cross context virtual CPU structure of the
1874 * calling thread.
1875 * @param NewSS The new SS selctor.
1876 * @param uCpl The CPL to load the stack for.
1877 * @param pDesc Where to return the descriptor.
1878 */
1879static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
1880{
1881 /* Null selectors are not allowed (we're not called for dispatching
1882 interrupts with SS=0 in long mode). */
1883 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1884 {
1885 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
1886 return iemRaiseTaskSwitchFault0(pVCpu);
1887 }
1888
1889 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1890 if ((NewSS & X86_SEL_RPL) != uCpl)
1891 {
1892 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1893 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1894 }
1895
1896 /*
1897 * Read the descriptor.
1898 */
1899 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
1900 if (rcStrict != VINF_SUCCESS)
1901 return rcStrict;
1902
1903 /*
1904 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1905 */
1906 if (!pDesc->Legacy.Gen.u1DescType)
1907 {
1908 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1909 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1910 }
1911
1912 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1913 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1914 {
1915 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1916 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1917 }
1918 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1919 {
1920 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1921 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1922 }
1923
1924 /* Is it there? */
1925 /** @todo testcase: Is this checked before the canonical / limit check below? */
1926 if (!pDesc->Legacy.Gen.u1Present)
1927 {
1928 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1929 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
1930 }
1931
1932 return VINF_SUCCESS;
1933}
1934
1935/** @} */
1936
1937
1938/** @name Raising Exceptions.
1939 *
1940 * @{
1941 */
1942
1943
1944/**
1945 * Loads the specified stack far pointer from the TSS.
1946 *
1947 * @returns VBox strict status code.
1948 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1949 * @param uCpl The CPL to load the stack for.
1950 * @param pSelSS Where to return the new stack segment.
1951 * @param puEsp Where to return the new stack pointer.
1952 */
1953static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
1954{
1955 VBOXSTRICTRC rcStrict;
1956 Assert(uCpl < 4);
1957
1958 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1959 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
1960 {
1961 /*
1962 * 16-bit TSS (X86TSS16).
1963 */
1964 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1965 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1966 {
1967 uint32_t off = uCpl * 4 + 2;
1968 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1969 {
1970 /** @todo check actual access pattern here. */
1971 uint32_t u32Tmp = 0; /* gcc maybe... */
1972 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1973 if (rcStrict == VINF_SUCCESS)
1974 {
1975 *puEsp = RT_LOWORD(u32Tmp);
1976 *pSelSS = RT_HIWORD(u32Tmp);
1977 return VINF_SUCCESS;
1978 }
1979 }
1980 else
1981 {
1982 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1983 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1984 }
1985 break;
1986 }
1987
1988 /*
1989 * 32-bit TSS (X86TSS32).
1990 */
1991 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1992 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1993 {
1994 uint32_t off = uCpl * 8 + 4;
1995 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1996 {
1997/** @todo check actual access pattern here. */
1998 uint64_t u64Tmp;
1999 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2000 if (rcStrict == VINF_SUCCESS)
2001 {
2002 *puEsp = u64Tmp & UINT32_MAX;
2003 *pSelSS = (RTSEL)(u64Tmp >> 32);
2004 return VINF_SUCCESS;
2005 }
2006 }
2007 else
2008 {
2009 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
2010 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2011 }
2012 break;
2013 }
2014
2015 default:
2016 AssertFailed();
2017 rcStrict = VERR_IEM_IPE_4;
2018 break;
2019 }
2020
2021 *puEsp = 0; /* make gcc happy */
2022 *pSelSS = 0; /* make gcc happy */
2023 return rcStrict;
2024}
2025
2026
2027/**
2028 * Loads the specified stack pointer from the 64-bit TSS.
2029 *
2030 * @returns VBox strict status code.
2031 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2032 * @param uCpl The CPL to load the stack for.
2033 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2034 * @param puRsp Where to return the new stack pointer.
2035 */
2036static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
2037{
2038 Assert(uCpl < 4);
2039 Assert(uIst < 8);
2040 *puRsp = 0; /* make gcc happy */
2041
2042 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2043 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
2044
2045 uint32_t off;
2046 if (uIst)
2047 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
2048 else
2049 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
2050 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
2051 {
2052 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
2053 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2054 }
2055
2056 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2057}
2058
2059
2060/**
2061 * Adjust the CPU state according to the exception being raised.
2062 *
2063 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2064 * @param u8Vector The exception that has been raised.
2065 */
2066DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
2067{
2068 switch (u8Vector)
2069 {
2070 case X86_XCPT_DB:
2071 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
2072 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
2073 break;
2074 /** @todo Read the AMD and Intel exception reference... */
2075 }
2076}
2077
2078
2079/**
2080 * Implements exceptions and interrupts for real mode.
2081 *
2082 * @returns VBox strict status code.
2083 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2084 * @param cbInstr The number of bytes to offset rIP by in the return
2085 * address.
2086 * @param u8Vector The interrupt / exception vector number.
2087 * @param fFlags The flags.
2088 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2089 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2090 */
2091static VBOXSTRICTRC
2092iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
2093 uint8_t cbInstr,
2094 uint8_t u8Vector,
2095 uint32_t fFlags,
2096 uint16_t uErr,
2097 uint64_t uCr2) RT_NOEXCEPT
2098{
2099 NOREF(uErr); NOREF(uCr2);
2100 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2101
2102 /*
2103 * Read the IDT entry.
2104 */
2105 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2106 {
2107 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2108 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2109 }
2110 RTFAR16 Idte;
2111 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
2112 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2113 {
2114 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2115 return rcStrict;
2116 }
2117
2118#ifdef LOG_ENABLED
2119 /* If software interrupt, try decode it if logging is enabled and such. */
2120 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2121 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
2122 iemLogSyscallRealModeInt(pVCpu, u8Vector, cbInstr);
2123#endif
2124
2125 /*
2126 * Push the stack frame.
2127 */
2128 uint8_t bUnmapInfo;
2129 uint16_t *pu16Frame;
2130 uint64_t uNewRsp;
2131 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &bUnmapInfo, &uNewRsp);
2132 if (rcStrict != VINF_SUCCESS)
2133 return rcStrict;
2134
2135 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
2136#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
2137 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
2138 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
2139 fEfl |= UINT16_C(0xf000);
2140#endif
2141 pu16Frame[2] = (uint16_t)fEfl;
2142 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
2143 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
2144 rcStrict = iemMemStackPushCommitSpecial(pVCpu, bUnmapInfo, uNewRsp);
2145 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2146 return rcStrict;
2147
2148 /*
2149 * Load the vector address into cs:ip and make exception specific state
2150 * adjustments.
2151 */
2152 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
2153 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
2154 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2155 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
2156 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2157 pVCpu->cpum.GstCtx.rip = Idte.off;
2158 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
2159 IEMMISC_SET_EFL(pVCpu, fEfl);
2160
2161 /** @todo do we actually do this in real mode? */
2162 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2163 iemRaiseXcptAdjustState(pVCpu, u8Vector);
2164
2165 /* The IEM_F_MODE_XXX and IEM_F_X86_CPL_MASK doesn't really change here,
2166 so best leave them alone in case we're in a weird kind of real mode... */
2167
2168 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2169}
2170
2171
2172/**
2173 * Loads a NULL data selector into when coming from V8086 mode.
2174 *
2175 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2176 * @param pSReg Pointer to the segment register.
2177 */
2178DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
2179{
2180 pSReg->Sel = 0;
2181 pSReg->ValidSel = 0;
2182 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2183 {
2184 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2185 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2186 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2187 }
2188 else
2189 {
2190 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2191 /** @todo check this on AMD-V */
2192 pSReg->u64Base = 0;
2193 pSReg->u32Limit = 0;
2194 }
2195}
2196
2197
2198/**
2199 * Loads a segment selector during a task switch in V8086 mode.
2200 *
2201 * @param pSReg Pointer to the segment register.
2202 * @param uSel The selector value to load.
2203 */
2204DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
2205{
2206 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2207 pSReg->Sel = uSel;
2208 pSReg->ValidSel = uSel;
2209 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2210 pSReg->u64Base = uSel << 4;
2211 pSReg->u32Limit = 0xffff;
2212 pSReg->Attr.u = 0xf3;
2213}
2214
2215
2216/**
2217 * Loads a segment selector during a task switch in protected mode.
2218 *
2219 * In this task switch scenario, we would throw \#TS exceptions rather than
2220 * \#GPs.
2221 *
2222 * @returns VBox strict status code.
2223 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2224 * @param pSReg Pointer to the segment register.
2225 * @param uSel The new selector value.
2226 *
2227 * @remarks This does _not_ handle CS or SS.
2228 * @remarks This expects IEM_GET_CPL(pVCpu) to return an up to date value.
2229 */
2230static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
2231{
2232 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2233
2234 /* Null data selector. */
2235 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2236 {
2237 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2238 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2239 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2240 return VINF_SUCCESS;
2241 }
2242
2243 /* Fetch the descriptor. */
2244 IEMSELDESC Desc;
2245 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2246 if (rcStrict != VINF_SUCCESS)
2247 {
2248 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2249 VBOXSTRICTRC_VAL(rcStrict)));
2250 return rcStrict;
2251 }
2252
2253 /* Must be a data segment or readable code segment. */
2254 if ( !Desc.Legacy.Gen.u1DescType
2255 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2256 {
2257 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2258 Desc.Legacy.Gen.u4Type));
2259 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2260 }
2261
2262 /* Check privileges for data segments and non-conforming code segments. */
2263 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2264 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2265 {
2266 /* The RPL and the new CPL must be less than or equal to the DPL. */
2267 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2268 || (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl))
2269 {
2270 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2271 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
2272 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2273 }
2274 }
2275
2276 /* Is it there? */
2277 if (!Desc.Legacy.Gen.u1Present)
2278 {
2279 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2280 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2281 }
2282
2283 /* The base and limit. */
2284 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2285 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2286
2287 /*
2288 * Ok, everything checked out fine. Now set the accessed bit before
2289 * committing the result into the registers.
2290 */
2291 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2292 {
2293 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2294 if (rcStrict != VINF_SUCCESS)
2295 return rcStrict;
2296 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2297 }
2298
2299 /* Commit */
2300 pSReg->Sel = uSel;
2301 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2302 pSReg->u32Limit = cbLimit;
2303 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2304 pSReg->ValidSel = uSel;
2305 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2306 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2307 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2308
2309 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2310 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2311 return VINF_SUCCESS;
2312}
2313
2314
2315/**
2316 * Performs a task switch.
2317 *
2318 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2319 * caller is responsible for performing the necessary checks (like DPL, TSS
2320 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2321 * reference for JMP, CALL, IRET.
2322 *
2323 * If the task switch is the due to a software interrupt or hardware exception,
2324 * the caller is responsible for validating the TSS selector and descriptor. See
2325 * Intel Instruction reference for INT n.
2326 *
2327 * @returns VBox strict status code.
2328 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2329 * @param enmTaskSwitch The cause of the task switch.
2330 * @param uNextEip The EIP effective after the task switch.
2331 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2332 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2333 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2334 * @param SelTss The TSS selector of the new task.
2335 * @param pNewDescTss Pointer to the new TSS descriptor.
2336 */
2337VBOXSTRICTRC
2338iemTaskSwitch(PVMCPUCC pVCpu,
2339 IEMTASKSWITCH enmTaskSwitch,
2340 uint32_t uNextEip,
2341 uint32_t fFlags,
2342 uint16_t uErr,
2343 uint64_t uCr2,
2344 RTSEL SelTss,
2345 PIEMSELDESC pNewDescTss) RT_NOEXCEPT
2346{
2347 Assert(!IEM_IS_REAL_MODE(pVCpu));
2348 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2349 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2350
2351 uint32_t const uNewTssType = pNewDescTss->Legacy.Gate.u4Type;
2352 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2353 || uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2354 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2355 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2356
2357 bool const fIsNewTss386 = ( uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2358 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2359
2360 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTss=%#x fIsNewTss386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTss,
2361 fIsNewTss386, pVCpu->cpum.GstCtx.eip, uNextEip));
2362
2363 /* Update CR2 in case it's a page-fault. */
2364 /** @todo This should probably be done much earlier in IEM/PGM. See
2365 * @bugref{5653#c49}. */
2366 if (fFlags & IEM_XCPT_FLAGS_CR2)
2367 pVCpu->cpum.GstCtx.cr2 = uCr2;
2368
2369 /*
2370 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2371 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2372 */
2373 uint32_t const uNewTssLimit = pNewDescTss->Legacy.Gen.u16LimitLow | (pNewDescTss->Legacy.Gen.u4LimitHigh << 16);
2374 uint32_t const uNewTssLimitMin = fIsNewTss386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2375 if (uNewTssLimit < uNewTssLimitMin)
2376 {
2377 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTssLimit=%#x uNewTssLimitMin=%#x -> #TS\n",
2378 enmTaskSwitch, uNewTssLimit, uNewTssLimitMin));
2379 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
2380 }
2381
2382 /*
2383 * Task switches in VMX non-root mode always cause task switches.
2384 * The new TSS must have been read and validated (DPL, limits etc.) before a
2385 * task-switch VM-exit commences.
2386 *
2387 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2388 */
2389 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2390 {
2391 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTss));
2392 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTss, uNextEip - pVCpu->cpum.GstCtx.eip);
2393 }
2394
2395 /*
2396 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2397 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2398 */
2399 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2400 {
2401 uint64_t const uExitInfo1 = SelTss;
2402 uint64_t uExitInfo2 = uErr;
2403 switch (enmTaskSwitch)
2404 {
2405 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2406 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2407 default: break;
2408 }
2409 if (fFlags & IEM_XCPT_FLAGS_ERR)
2410 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2411 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2412 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2413
2414 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2415 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2416 RT_NOREF2(uExitInfo1, uExitInfo2);
2417 }
2418
2419 /*
2420 * Check the current TSS limit. The last written byte to the current TSS during the
2421 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2422 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2423 *
2424 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2425 * end up with smaller than "legal" TSS limits.
2426 */
2427 uint32_t const uCurTssLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2428 uint32_t const uCurTssLimitMin = fIsNewTss386 ? 0x5F : 0x29;
2429 if (uCurTssLimit < uCurTssLimitMin)
2430 {
2431 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTssLimit=%#x uCurTssLimitMin=%#x -> #TS\n",
2432 enmTaskSwitch, uCurTssLimit, uCurTssLimitMin));
2433 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
2434 }
2435
2436 /*
2437 * Verify that the new TSS can be accessed and map it. Map only the required contents
2438 * and not the entire TSS.
2439 */
2440 uint8_t bUnmapInfoNewTss;
2441 void *pvNewTss;
2442 uint32_t const cbNewTss = uNewTssLimitMin + 1;
2443 RTGCPTR const GCPtrNewTss = X86DESC_BASE(&pNewDescTss->Legacy);
2444 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2445 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2446 * not perform correct translation if this happens. See Intel spec. 7.2.1
2447 * "Task-State Segment". */
2448 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTss, &bUnmapInfoNewTss, cbNewTss, UINT8_MAX, GCPtrNewTss, IEM_ACCESS_SYS_RW, 0);
2449/** @todo Not cleaning up bUnmapInfoNewTss mapping in any early exits here.
2450 * Consider wrapping the remainder into a function for simpler cleanup. */
2451 if (rcStrict != VINF_SUCCESS)
2452 {
2453 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTss=%u uNewTssLimit=%u rc=%Rrc\n", enmTaskSwitch,
2454 cbNewTss, uNewTssLimit, VBOXSTRICTRC_VAL(rcStrict)));
2455 return rcStrict;
2456 }
2457
2458 /*
2459 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2460 */
2461 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
2462 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2463 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2464 {
2465 uint8_t bUnmapInfoDescCurTss;
2466 PX86DESC pDescCurTss;
2467 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTss, &bUnmapInfoDescCurTss, sizeof(*pDescCurTss), UINT8_MAX,
2468 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2469 if (rcStrict != VINF_SUCCESS)
2470 {
2471 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2472 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2473 return rcStrict;
2474 }
2475
2476 pDescCurTss->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2477 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoDescCurTss);
2478 if (rcStrict != VINF_SUCCESS)
2479 {
2480 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2481 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2482 return rcStrict;
2483 }
2484
2485 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2486 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2487 {
2488 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2489 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2490 fEFlags &= ~X86_EFL_NT;
2491 }
2492 }
2493
2494 /*
2495 * Save the CPU state into the current TSS.
2496 */
2497 RTGCPTR const GCPtrCurTss = pVCpu->cpum.GstCtx.tr.u64Base;
2498 if (GCPtrNewTss == GCPtrCurTss)
2499 {
2500 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTss));
2501 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2502 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,
2503 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2504 pVCpu->cpum.GstCtx.ldtr.Sel));
2505 }
2506 if (fIsNewTss386)
2507 {
2508 /*
2509 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2510 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2511 */
2512 uint8_t bUnmapInfoCurTss32;
2513 void *pvCurTss32;
2514 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS32, eip);
2515 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2516 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2517 rcStrict = iemMemMap(pVCpu, &pvCurTss32, &bUnmapInfoCurTss32, cbCurTss, UINT8_MAX,
2518 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
2519 if (rcStrict != VINF_SUCCESS)
2520 {
2521 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
2522 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
2523 return rcStrict;
2524 }
2525
2526 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
2527 PX86TSS32 pCurTss32 = (PX86TSS32)((uintptr_t)pvCurTss32 - offCurTss);
2528 pCurTss32->eip = uNextEip;
2529 pCurTss32->eflags = fEFlags;
2530 pCurTss32->eax = pVCpu->cpum.GstCtx.eax;
2531 pCurTss32->ecx = pVCpu->cpum.GstCtx.ecx;
2532 pCurTss32->edx = pVCpu->cpum.GstCtx.edx;
2533 pCurTss32->ebx = pVCpu->cpum.GstCtx.ebx;
2534 pCurTss32->esp = pVCpu->cpum.GstCtx.esp;
2535 pCurTss32->ebp = pVCpu->cpum.GstCtx.ebp;
2536 pCurTss32->esi = pVCpu->cpum.GstCtx.esi;
2537 pCurTss32->edi = pVCpu->cpum.GstCtx.edi;
2538 pCurTss32->es = pVCpu->cpum.GstCtx.es.Sel;
2539 pCurTss32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2540 pCurTss32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2541 pCurTss32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2542 pCurTss32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2543 pCurTss32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2544
2545 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss32);
2546 if (rcStrict != VINF_SUCCESS)
2547 {
2548 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2549 VBOXSTRICTRC_VAL(rcStrict)));
2550 return rcStrict;
2551 }
2552 }
2553 else
2554 {
2555 /*
2556 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2557 */
2558 uint8_t bUnmapInfoCurTss16;
2559 void *pvCurTss16;
2560 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS16, ip);
2561 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2562 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2563 rcStrict = iemMemMap(pVCpu, &pvCurTss16, &bUnmapInfoCurTss16, cbCurTss, UINT8_MAX,
2564 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
2565 if (rcStrict != VINF_SUCCESS)
2566 {
2567 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
2568 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
2569 return rcStrict;
2570 }
2571
2572 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
2573 PX86TSS16 pCurTss16 = (PX86TSS16)((uintptr_t)pvCurTss16 - offCurTss);
2574 pCurTss16->ip = uNextEip;
2575 pCurTss16->flags = (uint16_t)fEFlags;
2576 pCurTss16->ax = pVCpu->cpum.GstCtx.ax;
2577 pCurTss16->cx = pVCpu->cpum.GstCtx.cx;
2578 pCurTss16->dx = pVCpu->cpum.GstCtx.dx;
2579 pCurTss16->bx = pVCpu->cpum.GstCtx.bx;
2580 pCurTss16->sp = pVCpu->cpum.GstCtx.sp;
2581 pCurTss16->bp = pVCpu->cpum.GstCtx.bp;
2582 pCurTss16->si = pVCpu->cpum.GstCtx.si;
2583 pCurTss16->di = pVCpu->cpum.GstCtx.di;
2584 pCurTss16->es = pVCpu->cpum.GstCtx.es.Sel;
2585 pCurTss16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2586 pCurTss16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2587 pCurTss16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2588
2589 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss16);
2590 if (rcStrict != VINF_SUCCESS)
2591 {
2592 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2593 VBOXSTRICTRC_VAL(rcStrict)));
2594 return rcStrict;
2595 }
2596 }
2597
2598 /*
2599 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2600 */
2601 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2602 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2603 {
2604 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2605 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTss;
2606 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2607 }
2608
2609 /*
2610 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2611 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2612 */
2613 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2614 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2615 bool fNewDebugTrap;
2616 if (fIsNewTss386)
2617 {
2618 PCX86TSS32 pNewTss32 = (PCX86TSS32)pvNewTss;
2619 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTss32->cr3 : 0;
2620 uNewEip = pNewTss32->eip;
2621 uNewEflags = pNewTss32->eflags;
2622 uNewEax = pNewTss32->eax;
2623 uNewEcx = pNewTss32->ecx;
2624 uNewEdx = pNewTss32->edx;
2625 uNewEbx = pNewTss32->ebx;
2626 uNewEsp = pNewTss32->esp;
2627 uNewEbp = pNewTss32->ebp;
2628 uNewEsi = pNewTss32->esi;
2629 uNewEdi = pNewTss32->edi;
2630 uNewES = pNewTss32->es;
2631 uNewCS = pNewTss32->cs;
2632 uNewSS = pNewTss32->ss;
2633 uNewDS = pNewTss32->ds;
2634 uNewFS = pNewTss32->fs;
2635 uNewGS = pNewTss32->gs;
2636 uNewLdt = pNewTss32->selLdt;
2637 fNewDebugTrap = RT_BOOL(pNewTss32->fDebugTrap);
2638 }
2639 else
2640 {
2641 PCX86TSS16 pNewTss16 = (PCX86TSS16)pvNewTss;
2642 uNewCr3 = 0;
2643 uNewEip = pNewTss16->ip;
2644 uNewEflags = pNewTss16->flags;
2645 uNewEax = UINT32_C(0xffff0000) | pNewTss16->ax;
2646 uNewEcx = UINT32_C(0xffff0000) | pNewTss16->cx;
2647 uNewEdx = UINT32_C(0xffff0000) | pNewTss16->dx;
2648 uNewEbx = UINT32_C(0xffff0000) | pNewTss16->bx;
2649 uNewEsp = UINT32_C(0xffff0000) | pNewTss16->sp;
2650 uNewEbp = UINT32_C(0xffff0000) | pNewTss16->bp;
2651 uNewEsi = UINT32_C(0xffff0000) | pNewTss16->si;
2652 uNewEdi = UINT32_C(0xffff0000) | pNewTss16->di;
2653 uNewES = pNewTss16->es;
2654 uNewCS = pNewTss16->cs;
2655 uNewSS = pNewTss16->ss;
2656 uNewDS = pNewTss16->ds;
2657 uNewFS = 0;
2658 uNewGS = 0;
2659 uNewLdt = pNewTss16->selLdt;
2660 fNewDebugTrap = false;
2661 }
2662
2663 if (GCPtrNewTss == GCPtrCurTss)
2664 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2665 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2666
2667 /*
2668 * We're done accessing the new TSS.
2669 */
2670 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
2671 if (rcStrict != VINF_SUCCESS)
2672 {
2673 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2674 return rcStrict;
2675 }
2676
2677 /*
2678 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2679 */
2680 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2681 {
2682 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTss, &bUnmapInfoNewTss, sizeof(*pNewDescTss), UINT8_MAX,
2683 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTss & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2684 if (rcStrict != VINF_SUCCESS)
2685 {
2686 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2687 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2688 return rcStrict;
2689 }
2690
2691 /* Check that the descriptor indicates the new TSS is available (not busy). */
2692 AssertMsg( pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2693 || pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2694 ("Invalid TSS descriptor type=%#x", pNewDescTss->Legacy.Gate.u4Type));
2695
2696 pNewDescTss->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2697 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
2698 if (rcStrict != VINF_SUCCESS)
2699 {
2700 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2701 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2702 return rcStrict;
2703 }
2704 }
2705
2706 /*
2707 * From this point on, we're technically in the new task. We will defer exceptions
2708 * until the completion of the task switch but before executing any instructions in the new task.
2709 */
2710 pVCpu->cpum.GstCtx.tr.Sel = SelTss;
2711 pVCpu->cpum.GstCtx.tr.ValidSel = SelTss;
2712 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2713 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTss->Legacy);
2714 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTss->Legacy);
2715 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTss->Legacy);
2716 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
2717
2718 /* Set the busy bit in TR. */
2719 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2720
2721 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2722 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2723 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2724 {
2725 uNewEflags |= X86_EFL_NT;
2726 }
2727
2728 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2729 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
2730 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
2731
2732 pVCpu->cpum.GstCtx.eip = uNewEip;
2733 pVCpu->cpum.GstCtx.eax = uNewEax;
2734 pVCpu->cpum.GstCtx.ecx = uNewEcx;
2735 pVCpu->cpum.GstCtx.edx = uNewEdx;
2736 pVCpu->cpum.GstCtx.ebx = uNewEbx;
2737 pVCpu->cpum.GstCtx.esp = uNewEsp;
2738 pVCpu->cpum.GstCtx.ebp = uNewEbp;
2739 pVCpu->cpum.GstCtx.esi = uNewEsi;
2740 pVCpu->cpum.GstCtx.edi = uNewEdi;
2741
2742 uNewEflags &= X86_EFL_LIVE_MASK;
2743 uNewEflags |= X86_EFL_RA1_MASK;
2744 IEMMISC_SET_EFL(pVCpu, uNewEflags);
2745
2746 /*
2747 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2748 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2749 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2750 */
2751 pVCpu->cpum.GstCtx.es.Sel = uNewES;
2752 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
2753
2754 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2755 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
2756
2757 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2758 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
2759
2760 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
2761 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
2762
2763 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
2764 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
2765
2766 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
2767 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
2768 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2769
2770 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
2771 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2772 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
2773 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
2774
2775 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2776 {
2777 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
2778 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
2779 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
2780 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
2781 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
2782 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
2783 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2784 }
2785
2786 /*
2787 * Switch CR3 for the new task.
2788 */
2789 if ( fIsNewTss386
2790 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
2791 {
2792 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2793 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
2794 AssertRCSuccessReturn(rc, rc);
2795
2796 /* Inform PGM. */
2797 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
2798 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
2799 AssertRCReturn(rc, rc);
2800 /* ignore informational status codes */
2801
2802 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
2803 }
2804
2805 /*
2806 * Switch LDTR for the new task.
2807 */
2808 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2809 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
2810 else
2811 {
2812 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2813
2814 IEMSELDESC DescNewLdt;
2815 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2816 if (rcStrict != VINF_SUCCESS)
2817 {
2818 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2819 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2820 return rcStrict;
2821 }
2822 if ( !DescNewLdt.Legacy.Gen.u1Present
2823 || DescNewLdt.Legacy.Gen.u1DescType
2824 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2825 {
2826 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2827 uNewLdt, DescNewLdt.Legacy.u));
2828 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2829 }
2830
2831 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
2832 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2833 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2834 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2835 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2836 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2837 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2838 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
2839 }
2840
2841 IEMSELDESC DescSS;
2842 if (IEM_IS_V86_MODE(pVCpu))
2843 {
2844 IEM_SET_CPL(pVCpu, 3);
2845 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
2846 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
2847 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
2848 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
2849 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
2850 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
2851
2852 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
2853 DescSS.Legacy.u = 0;
2854 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
2855 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
2856 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
2857 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
2858 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
2859 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
2860 DescSS.Legacy.Gen.u2Dpl = 3;
2861 }
2862 else
2863 {
2864 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
2865
2866 /*
2867 * Load the stack segment for the new task.
2868 */
2869 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2870 {
2871 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2872 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2873 }
2874
2875 /* Fetch the descriptor. */
2876 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
2877 if (rcStrict != VINF_SUCCESS)
2878 {
2879 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2880 VBOXSTRICTRC_VAL(rcStrict)));
2881 return rcStrict;
2882 }
2883
2884 /* SS must be a data segment and writable. */
2885 if ( !DescSS.Legacy.Gen.u1DescType
2886 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2887 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2888 {
2889 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2890 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2891 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2892 }
2893
2894 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2895 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2896 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2897 {
2898 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2899 uNewCpl));
2900 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2901 }
2902
2903 /* Is it there? */
2904 if (!DescSS.Legacy.Gen.u1Present)
2905 {
2906 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
2907 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2908 }
2909
2910 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
2911 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
2912
2913 /* Set the accessed bit before committing the result into SS. */
2914 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2915 {
2916 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
2917 if (rcStrict != VINF_SUCCESS)
2918 return rcStrict;
2919 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2920 }
2921
2922 /* Commit SS. */
2923 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2924 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
2925 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2926 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
2927 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
2928 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2929 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
2930
2931 /* CPL has changed, update IEM before loading rest of segments. */
2932 IEM_SET_CPL(pVCpu, uNewCpl);
2933
2934 /*
2935 * Load the data segments for the new task.
2936 */
2937 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
2938 if (rcStrict != VINF_SUCCESS)
2939 return rcStrict;
2940 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
2941 if (rcStrict != VINF_SUCCESS)
2942 return rcStrict;
2943 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
2944 if (rcStrict != VINF_SUCCESS)
2945 return rcStrict;
2946 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
2947 if (rcStrict != VINF_SUCCESS)
2948 return rcStrict;
2949
2950 /*
2951 * Load the code segment for the new task.
2952 */
2953 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
2954 {
2955 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
2956 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2957 }
2958
2959 /* Fetch the descriptor. */
2960 IEMSELDESC DescCS;
2961 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
2962 if (rcStrict != VINF_SUCCESS)
2963 {
2964 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
2965 return rcStrict;
2966 }
2967
2968 /* CS must be a code segment. */
2969 if ( !DescCS.Legacy.Gen.u1DescType
2970 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2971 {
2972 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
2973 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
2974 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2975 }
2976
2977 /* For conforming CS, DPL must be less than or equal to the RPL. */
2978 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2979 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
2980 {
2981 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
2982 DescCS.Legacy.Gen.u2Dpl));
2983 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2984 }
2985
2986 /* For non-conforming CS, DPL must match RPL. */
2987 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2988 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
2989 {
2990 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
2991 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
2992 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2993 }
2994
2995 /* Is it there? */
2996 if (!DescCS.Legacy.Gen.u1Present)
2997 {
2998 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
2999 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3000 }
3001
3002 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
3003 u64Base = X86DESC_BASE(&DescCS.Legacy);
3004
3005 /* Set the accessed bit before committing the result into CS. */
3006 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3007 {
3008 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
3009 if (rcStrict != VINF_SUCCESS)
3010 return rcStrict;
3011 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3012 }
3013
3014 /* Commit CS. */
3015 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
3016 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
3017 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3018 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
3019 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
3020 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3021 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
3022 }
3023
3024 /* Make sure the CPU mode is correct. */
3025 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3026 if (fExecNew != pVCpu->iem.s.fExec)
3027 Log(("iemTaskSwitch: fExec %#x -> %#x (xor %#x)\n", pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3028 pVCpu->iem.s.fExec = fExecNew;
3029
3030 /** @todo Debug trap. */
3031 if (fIsNewTss386 && fNewDebugTrap)
3032 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
3033
3034 /*
3035 * Construct the error code masks based on what caused this task switch.
3036 * See Intel Instruction reference for INT.
3037 */
3038 uint16_t uExt;
3039 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
3040 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3041 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
3042 uExt = 1;
3043 else
3044 uExt = 0;
3045
3046 /*
3047 * Push any error code on to the new stack.
3048 */
3049 if (fFlags & IEM_XCPT_FLAGS_ERR)
3050 {
3051 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3052 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3053 uint8_t const cbStackFrame = fIsNewTss386 ? 4 : 2;
3054
3055 /* Check that there is sufficient space on the stack. */
3056 /** @todo Factor out segment limit checking for normal/expand down segments
3057 * into a separate function. */
3058 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3059 {
3060 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
3061 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
3062 {
3063 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3064 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
3065 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3066 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3067 }
3068 }
3069 else
3070 {
3071 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
3072 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3073 {
3074 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
3075 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3076 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3077 }
3078 }
3079
3080
3081 if (fIsNewTss386)
3082 rcStrict = iemMemStackPushU32(pVCpu, uErr);
3083 else
3084 rcStrict = iemMemStackPushU16(pVCpu, uErr);
3085 if (rcStrict != VINF_SUCCESS)
3086 {
3087 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
3088 fIsNewTss386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
3089 return rcStrict;
3090 }
3091 }
3092
3093 /* Check the new EIP against the new CS limit. */
3094 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
3095 {
3096 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
3097 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
3098 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3099 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
3100 }
3101
3102 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3103 pVCpu->cpum.GstCtx.ss.Sel));
3104 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3105}
3106
3107
3108/**
3109 * Implements exceptions and interrupts for protected mode.
3110 *
3111 * @returns VBox strict status code.
3112 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3113 * @param cbInstr The number of bytes to offset rIP by in the return
3114 * address.
3115 * @param u8Vector The interrupt / exception vector number.
3116 * @param fFlags The flags.
3117 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3118 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3119 */
3120static VBOXSTRICTRC
3121iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
3122 uint8_t cbInstr,
3123 uint8_t u8Vector,
3124 uint32_t fFlags,
3125 uint16_t uErr,
3126 uint64_t uCr2) RT_NOEXCEPT
3127{
3128 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3129
3130 /*
3131 * Read the IDT entry.
3132 */
3133 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3134 {
3135 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3136 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3137 }
3138 X86DESC Idte;
3139 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
3140 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
3141 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3142 {
3143 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3144 return rcStrict;
3145 }
3146 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x - from %04x:%08RX64 efl=%#x depth=%d\n",
3147 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3148 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow,
3149 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->iem.s.cXcptRecursions));
3150
3151 /*
3152 * Check the descriptor type, DPL and such.
3153 * ASSUMES this is done in the same order as described for call-gate calls.
3154 */
3155 if (Idte.Gate.u1DescType)
3156 {
3157 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3158 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3159 }
3160 bool fTaskGate = false;
3161 uint8_t f32BitGate = true;
3162 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3163 switch (Idte.Gate.u4Type)
3164 {
3165 case X86_SEL_TYPE_SYS_UNDEFINED:
3166 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3167 case X86_SEL_TYPE_SYS_LDT:
3168 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3169 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3170 case X86_SEL_TYPE_SYS_UNDEFINED2:
3171 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3172 case X86_SEL_TYPE_SYS_UNDEFINED3:
3173 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3174 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3175 case X86_SEL_TYPE_SYS_UNDEFINED4:
3176 {
3177 /** @todo check what actually happens when the type is wrong...
3178 * esp. call gates. */
3179 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3180 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3181 }
3182
3183 case X86_SEL_TYPE_SYS_286_INT_GATE:
3184 f32BitGate = false;
3185 RT_FALL_THRU();
3186 case X86_SEL_TYPE_SYS_386_INT_GATE:
3187 fEflToClear |= X86_EFL_IF;
3188 break;
3189
3190 case X86_SEL_TYPE_SYS_TASK_GATE:
3191 fTaskGate = true;
3192#ifndef IEM_IMPLEMENTS_TASKSWITCH
3193 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3194#endif
3195 break;
3196
3197 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3198 f32BitGate = false;
3199 break;
3200 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3201 break;
3202
3203 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3204 }
3205
3206 /* Check DPL against CPL if applicable. */
3207 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3208 {
3209 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3210 {
3211 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3212 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3213 }
3214 }
3215
3216 /* Is it there? */
3217 if (!Idte.Gate.u1Present)
3218 {
3219 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3220 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3221 }
3222
3223 /* Is it a task-gate? */
3224 if (fTaskGate)
3225 {
3226 /*
3227 * Construct the error code masks based on what caused this task switch.
3228 * See Intel Instruction reference for INT.
3229 */
3230 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3231 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
3232 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3233 RTSEL SelTss = Idte.Gate.u16Sel;
3234
3235 /*
3236 * Fetch the TSS descriptor in the GDT.
3237 */
3238 IEMSELDESC DescTSS;
3239 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTss, X86_XCPT_GP, (SelTss & uSelMask) | uExt);
3240 if (rcStrict != VINF_SUCCESS)
3241 {
3242 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTss,
3243 VBOXSTRICTRC_VAL(rcStrict)));
3244 return rcStrict;
3245 }
3246
3247 /* The TSS descriptor must be a system segment and be available (not busy). */
3248 if ( DescTSS.Legacy.Gen.u1DescType
3249 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3250 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3251 {
3252 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3253 u8Vector, SelTss, DescTSS.Legacy.au64));
3254 return iemRaiseGeneralProtectionFault(pVCpu, (SelTss & uSelMask) | uExt);
3255 }
3256
3257 /* The TSS must be present. */
3258 if (!DescTSS.Legacy.Gen.u1Present)
3259 {
3260 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTss, DescTSS.Legacy.au64));
3261 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTss & uSelMask) | uExt);
3262 }
3263
3264 /* Do the actual task switch. */
3265 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3266 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3267 fFlags, uErr, uCr2, SelTss, &DescTSS);
3268 }
3269
3270 /* A null CS is bad. */
3271 RTSEL NewCS = Idte.Gate.u16Sel;
3272 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3273 {
3274 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3275 return iemRaiseGeneralProtectionFault0(pVCpu);
3276 }
3277
3278 /* Fetch the descriptor for the new CS. */
3279 IEMSELDESC DescCS;
3280 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3281 if (rcStrict != VINF_SUCCESS)
3282 {
3283 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3284 return rcStrict;
3285 }
3286
3287 /* Must be a code segment. */
3288 if (!DescCS.Legacy.Gen.u1DescType)
3289 {
3290 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3291 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3292 }
3293 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3294 {
3295 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3296 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3297 }
3298
3299 /* Don't allow lowering the privilege level. */
3300 /** @todo Does the lowering of privileges apply to software interrupts
3301 * only? This has bearings on the more-privileged or
3302 * same-privilege stack behavior further down. A testcase would
3303 * be nice. */
3304 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3305 {
3306 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3307 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3308 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3309 }
3310
3311 /* Make sure the selector is present. */
3312 if (!DescCS.Legacy.Gen.u1Present)
3313 {
3314 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3315 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3316 }
3317
3318#ifdef LOG_ENABLED
3319 /* If software interrupt, try decode it if logging is enabled and such. */
3320 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3321 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
3322 iemLogSyscallProtModeInt(pVCpu, u8Vector, cbInstr);
3323#endif
3324
3325 /* Check the new EIP against the new CS limit. */
3326 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3327 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3328 ? Idte.Gate.u16OffsetLow
3329 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3330 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3331 if (uNewEip > cbLimitCS)
3332 {
3333 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3334 u8Vector, uNewEip, cbLimitCS, NewCS));
3335 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3336 }
3337 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3338
3339 /* Calc the flag image to push. */
3340 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3341 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3342 fEfl &= ~X86_EFL_RF;
3343 else
3344 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3345
3346 /* From V8086 mode only go to CPL 0. */
3347 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3348 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3349 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3350 {
3351 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3352 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3353 }
3354
3355 /*
3356 * If the privilege level changes, we need to get a new stack from the TSS.
3357 * This in turns means validating the new SS and ESP...
3358 */
3359 if (uNewCpl != IEM_GET_CPL(pVCpu))
3360 {
3361 RTSEL NewSS;
3362 uint32_t uNewEsp;
3363 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3364 if (rcStrict != VINF_SUCCESS)
3365 return rcStrict;
3366
3367 IEMSELDESC DescSS;
3368 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3369 if (rcStrict != VINF_SUCCESS)
3370 return rcStrict;
3371 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3372 if (!DescSS.Legacy.Gen.u1DefBig)
3373 {
3374 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3375 uNewEsp = (uint16_t)uNewEsp;
3376 }
3377
3378 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3379
3380 /* Check that there is sufficient space for the stack frame. */
3381 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3382 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3383 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3384 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3385
3386 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3387 {
3388 if ( uNewEsp - 1 > cbLimitSS
3389 || uNewEsp < cbStackFrame)
3390 {
3391 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3392 u8Vector, NewSS, uNewEsp, cbStackFrame));
3393 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3394 }
3395 }
3396 else
3397 {
3398 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3399 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3400 {
3401 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3402 u8Vector, NewSS, uNewEsp, cbStackFrame));
3403 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3404 }
3405 }
3406
3407 /*
3408 * Start making changes.
3409 */
3410
3411 /* Set the new CPL so that stack accesses use it. */
3412 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3413 IEM_SET_CPL(pVCpu, uNewCpl);
3414
3415 /* Create the stack frame. */
3416 uint8_t bUnmapInfoStackFrame;
3417 RTPTRUNION uStackFrame;
3418 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
3419 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
3420 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3421 if (rcStrict != VINF_SUCCESS)
3422 return rcStrict;
3423 if (f32BitGate)
3424 {
3425 if (fFlags & IEM_XCPT_FLAGS_ERR)
3426 *uStackFrame.pu32++ = uErr;
3427 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3428 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3429 uStackFrame.pu32[2] = fEfl;
3430 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3431 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3432 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3433 if (fEfl & X86_EFL_VM)
3434 {
3435 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3436 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3437 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3438 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3439 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3440 }
3441 }
3442 else
3443 {
3444 if (fFlags & IEM_XCPT_FLAGS_ERR)
3445 *uStackFrame.pu16++ = uErr;
3446 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3447 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3448 uStackFrame.pu16[2] = fEfl;
3449 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3450 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3451 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3452 if (fEfl & X86_EFL_VM)
3453 {
3454 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3455 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3456 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3457 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3458 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3459 }
3460 }
3461 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
3462 if (rcStrict != VINF_SUCCESS)
3463 return rcStrict;
3464
3465 /* Mark the selectors 'accessed' (hope this is the correct time). */
3466 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3467 * after pushing the stack frame? (Write protect the gdt + stack to
3468 * find out.) */
3469 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3470 {
3471 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3472 if (rcStrict != VINF_SUCCESS)
3473 return rcStrict;
3474 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3475 }
3476
3477 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3478 {
3479 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3480 if (rcStrict != VINF_SUCCESS)
3481 return rcStrict;
3482 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3483 }
3484
3485 /*
3486 * Start comitting the register changes (joins with the DPL=CPL branch).
3487 */
3488 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3489 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3490 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3491 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3492 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3493 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3494 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3495 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3496 * SP is loaded).
3497 * Need to check the other combinations too:
3498 * - 16-bit TSS, 32-bit handler
3499 * - 32-bit TSS, 16-bit handler */
3500 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3501 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3502 else
3503 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3504
3505 if (fEfl & X86_EFL_VM)
3506 {
3507 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3508 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3509 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3510 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3511 }
3512 }
3513 /*
3514 * Same privilege, no stack change and smaller stack frame.
3515 */
3516 else
3517 {
3518 uint64_t uNewRsp;
3519 uint8_t bUnmapInfoStackFrame;
3520 RTPTRUNION uStackFrame;
3521 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3522 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1,
3523 &uStackFrame.pv, &bUnmapInfoStackFrame, &uNewRsp);
3524 if (rcStrict != VINF_SUCCESS)
3525 return rcStrict;
3526
3527 if (f32BitGate)
3528 {
3529 if (fFlags & IEM_XCPT_FLAGS_ERR)
3530 *uStackFrame.pu32++ = uErr;
3531 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3532 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3533 uStackFrame.pu32[2] = fEfl;
3534 }
3535 else
3536 {
3537 if (fFlags & IEM_XCPT_FLAGS_ERR)
3538 *uStackFrame.pu16++ = uErr;
3539 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3540 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3541 uStackFrame.pu16[2] = fEfl;
3542 }
3543 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame); /* don't use the commit here */
3544 if (rcStrict != VINF_SUCCESS)
3545 return rcStrict;
3546
3547 /* Mark the CS selector as 'accessed'. */
3548 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3549 {
3550 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3551 if (rcStrict != VINF_SUCCESS)
3552 return rcStrict;
3553 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3554 }
3555
3556 /*
3557 * Start committing the register changes (joins with the other branch).
3558 */
3559 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3560 }
3561
3562 /* ... register committing continues. */
3563 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3564 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3565 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3566 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3567 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3568 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3569
3570 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3571 fEfl &= ~fEflToClear;
3572 IEMMISC_SET_EFL(pVCpu, fEfl);
3573
3574 if (fFlags & IEM_XCPT_FLAGS_CR2)
3575 pVCpu->cpum.GstCtx.cr2 = uCr2;
3576
3577 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3578 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3579
3580 /* Make sure the execution flags are correct. */
3581 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3582 if (fExecNew != pVCpu->iem.s.fExec)
3583 Log(("iemRaiseXcptOrIntInProtMode: fExec %#x -> %#x (xor %#x)\n",
3584 pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3585 pVCpu->iem.s.fExec = fExecNew;
3586 Assert(IEM_GET_CPL(pVCpu) == uNewCpl);
3587
3588 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3589}
3590
3591
3592/**
3593 * Implements exceptions and interrupts for long mode.
3594 *
3595 * @returns VBox strict status code.
3596 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3597 * @param cbInstr The number of bytes to offset rIP by in the return
3598 * address.
3599 * @param u8Vector The interrupt / exception vector number.
3600 * @param fFlags The flags.
3601 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3602 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3603 */
3604static VBOXSTRICTRC
3605iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3606 uint8_t cbInstr,
3607 uint8_t u8Vector,
3608 uint32_t fFlags,
3609 uint16_t uErr,
3610 uint64_t uCr2) RT_NOEXCEPT
3611{
3612 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3613
3614 /*
3615 * Read the IDT entry.
3616 */
3617 uint16_t offIdt = (uint16_t)u8Vector << 4;
3618 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3619 {
3620 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3621 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3622 }
3623 X86DESC64 Idte;
3624#ifdef _MSC_VER /* Shut up silly compiler warning. */
3625 Idte.au64[0] = 0;
3626 Idte.au64[1] = 0;
3627#endif
3628 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3629 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3630 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3631 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3632 {
3633 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3634 return rcStrict;
3635 }
3636 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3637 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3638 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3639
3640 /*
3641 * Check the descriptor type, DPL and such.
3642 * ASSUMES this is done in the same order as described for call-gate calls.
3643 */
3644 if (Idte.Gate.u1DescType)
3645 {
3646 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3647 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3648 }
3649 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3650 switch (Idte.Gate.u4Type)
3651 {
3652 case AMD64_SEL_TYPE_SYS_INT_GATE:
3653 fEflToClear |= X86_EFL_IF;
3654 break;
3655 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3656 break;
3657
3658 default:
3659 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3660 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3661 }
3662
3663 /* Check DPL against CPL if applicable. */
3664 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3665 {
3666 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3667 {
3668 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3669 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3670 }
3671 }
3672
3673 /* Is it there? */
3674 if (!Idte.Gate.u1Present)
3675 {
3676 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3677 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3678 }
3679
3680 /* A null CS is bad. */
3681 RTSEL NewCS = Idte.Gate.u16Sel;
3682 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3683 {
3684 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3685 return iemRaiseGeneralProtectionFault0(pVCpu);
3686 }
3687
3688 /* Fetch the descriptor for the new CS. */
3689 IEMSELDESC DescCS;
3690 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
3691 if (rcStrict != VINF_SUCCESS)
3692 {
3693 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3694 return rcStrict;
3695 }
3696
3697 /* Must be a 64-bit code segment. */
3698 if (!DescCS.Long.Gen.u1DescType)
3699 {
3700 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3701 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3702 }
3703 if ( !DescCS.Long.Gen.u1Long
3704 || DescCS.Long.Gen.u1DefBig
3705 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3706 {
3707 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3708 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3709 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3710 }
3711
3712 /* Don't allow lowering the privilege level. For non-conforming CS
3713 selectors, the CS.DPL sets the privilege level the trap/interrupt
3714 handler runs at. For conforming CS selectors, the CPL remains
3715 unchanged, but the CS.DPL must be <= CPL. */
3716 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3717 * when CPU in Ring-0. Result \#GP? */
3718 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3719 {
3720 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3721 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3722 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3723 }
3724
3725
3726 /* Make sure the selector is present. */
3727 if (!DescCS.Legacy.Gen.u1Present)
3728 {
3729 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3730 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3731 }
3732
3733 /* Check that the new RIP is canonical. */
3734 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3735 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3736 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3737 if (!IEM_IS_CANONICAL(uNewRip))
3738 {
3739 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3740 return iemRaiseGeneralProtectionFault0(pVCpu);
3741 }
3742
3743 /*
3744 * If the privilege level changes or if the IST isn't zero, we need to get
3745 * a new stack from the TSS.
3746 */
3747 uint64_t uNewRsp;
3748 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3749 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3750 if ( uNewCpl != IEM_GET_CPL(pVCpu)
3751 || Idte.Gate.u3IST != 0)
3752 {
3753 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3754 if (rcStrict != VINF_SUCCESS)
3755 return rcStrict;
3756 }
3757 else
3758 uNewRsp = pVCpu->cpum.GstCtx.rsp;
3759 uNewRsp &= ~(uint64_t)0xf;
3760
3761 /*
3762 * Calc the flag image to push.
3763 */
3764 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3765 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3766 fEfl &= ~X86_EFL_RF;
3767 else
3768 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3769
3770 /*
3771 * Start making changes.
3772 */
3773 /* Set the new CPL so that stack accesses use it. */
3774 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3775 IEM_SET_CPL(pVCpu, uNewCpl);
3776/** @todo Setting CPL this early seems wrong as it would affect and errors we
3777 * raise accessing the stack and (?) GDT/LDT... */
3778
3779 /* Create the stack frame. */
3780 uint8_t bUnmapInfoStackFrame;
3781 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3782 RTPTRUNION uStackFrame;
3783 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
3784 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3785 if (rcStrict != VINF_SUCCESS)
3786 return rcStrict;
3787
3788 if (fFlags & IEM_XCPT_FLAGS_ERR)
3789 *uStackFrame.pu64++ = uErr;
3790 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
3791 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
3792 uStackFrame.pu64[2] = fEfl;
3793 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
3794 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
3795 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
3796 if (rcStrict != VINF_SUCCESS)
3797 return rcStrict;
3798
3799 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3800 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3801 * after pushing the stack frame? (Write protect the gdt + stack to
3802 * find out.) */
3803 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3804 {
3805 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3806 if (rcStrict != VINF_SUCCESS)
3807 return rcStrict;
3808 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3809 }
3810
3811 /*
3812 * Start comitting the register changes.
3813 */
3814 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3815 * hidden registers when interrupting 32-bit or 16-bit code! */
3816 if (uNewCpl != uOldCpl)
3817 {
3818 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
3819 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
3820 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3821 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3822 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3823 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3824 }
3825 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
3826 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3827 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3828 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3829 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3830 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3831 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3832 pVCpu->cpum.GstCtx.rip = uNewRip;
3833
3834 fEfl &= ~fEflToClear;
3835 IEMMISC_SET_EFL(pVCpu, fEfl);
3836
3837 if (fFlags & IEM_XCPT_FLAGS_CR2)
3838 pVCpu->cpum.GstCtx.cr2 = uCr2;
3839
3840 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3841 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3842
3843 iemRecalcExecModeAndCplFlags(pVCpu);
3844
3845 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3846}
3847
3848
3849/**
3850 * Implements exceptions and interrupts.
3851 *
3852 * All exceptions and interrupts goes thru this function!
3853 *
3854 * @returns VBox strict status code.
3855 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3856 * @param cbInstr The number of bytes to offset rIP by in the return
3857 * address.
3858 * @param u8Vector The interrupt / exception vector number.
3859 * @param fFlags The flags.
3860 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3861 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3862 */
3863VBOXSTRICTRC
3864iemRaiseXcptOrInt(PVMCPUCC pVCpu,
3865 uint8_t cbInstr,
3866 uint8_t u8Vector,
3867 uint32_t fFlags,
3868 uint16_t uErr,
3869 uint64_t uCr2) RT_NOEXCEPT
3870{
3871 /*
3872 * Get all the state that we might need here.
3873 */
3874 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3875 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3876
3877#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
3878 /*
3879 * Flush prefetch buffer
3880 */
3881 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3882#endif
3883
3884 /*
3885 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3886 */
3887 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
3888 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
3889 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
3890 | IEM_XCPT_FLAGS_BP_INSTR
3891 | IEM_XCPT_FLAGS_ICEBP_INSTR
3892 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3893 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
3894 {
3895 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3896 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3897 u8Vector = X86_XCPT_GP;
3898 uErr = 0;
3899 }
3900
3901 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3902#ifdef DBGFTRACE_ENABLED
3903 RTTraceBufAddMsgF(pVM->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3904 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3905 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
3906#endif
3907
3908 /*
3909 * Check if DBGF wants to intercept the exception.
3910 */
3911 if ( (fFlags & (IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_T_SOFT_INT))
3912 || !DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector)) )
3913 { /* likely */ }
3914 else
3915 {
3916 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector),
3917 DBGFEVENTCTX_INVALID, 1, (uint64_t)uErr);
3918 if (rcStrict != VINF_SUCCESS)
3919 return rcStrict;
3920 }
3921
3922 /*
3923 * Evaluate whether NMI blocking should be in effect.
3924 * Normally, NMI blocking is in effect whenever we inject an NMI.
3925 */
3926 bool fBlockNmi = u8Vector == X86_XCPT_NMI
3927 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);
3928
3929#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3930 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3931 {
3932 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
3933 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3934 return rcStrict0;
3935
3936 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
3937 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
3938 {
3939 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
3940 fBlockNmi = false;
3941 }
3942 }
3943#endif
3944
3945#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3946 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
3947 {
3948 /*
3949 * If the event is being injected as part of VMRUN, it isn't subject to event
3950 * intercepts in the nested-guest. However, secondary exceptions that occur
3951 * during injection of any event -are- subject to exception intercepts.
3952 *
3953 * See AMD spec. 15.20 "Event Injection".
3954 */
3955 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
3956 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
3957 else
3958 {
3959 /*
3960 * Check and handle if the event being raised is intercepted.
3961 */
3962 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3963 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
3964 return rcStrict0;
3965 }
3966 }
3967#endif
3968
3969 /*
3970 * Set NMI blocking if necessary.
3971 */
3972 if (fBlockNmi)
3973 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
3974
3975 /*
3976 * Do recursion accounting.
3977 */
3978 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
3979 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
3980 if (pVCpu->iem.s.cXcptRecursions == 0)
3981 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3982 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
3983 else
3984 {
3985 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3986 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
3987 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
3988
3989 if (pVCpu->iem.s.cXcptRecursions >= 4)
3990 {
3991#ifdef DEBUG_bird
3992 AssertFailed();
3993#endif
3994 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3995 }
3996
3997 /*
3998 * Evaluate the sequence of recurring events.
3999 */
4000 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
4001 NULL /* pXcptRaiseInfo */);
4002 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
4003 { /* likely */ }
4004 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
4005 {
4006 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
4007 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
4008 u8Vector = X86_XCPT_DF;
4009 uErr = 0;
4010#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4011 /* VMX nested-guest #DF intercept needs to be checked here. */
4012 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4013 {
4014 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
4015 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4016 return rcStrict0;
4017 }
4018#endif
4019 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
4020 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
4021 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4022 }
4023 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
4024 {
4025 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
4026 return iemInitiateCpuShutdown(pVCpu);
4027 }
4028 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
4029 {
4030 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
4031 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
4032 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
4033 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
4034 return VERR_EM_GUEST_CPU_HANG;
4035 }
4036 else
4037 {
4038 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
4039 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
4040 return VERR_IEM_IPE_9;
4041 }
4042
4043 /*
4044 * The 'EXT' bit is set when an exception occurs during deliver of an external
4045 * event (such as an interrupt or earlier exception)[1]. Privileged software
4046 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
4047 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
4048 *
4049 * [1] - Intel spec. 6.13 "Error Code"
4050 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
4051 * [3] - Intel Instruction reference for INT n.
4052 */
4053 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
4054 && (fFlags & IEM_XCPT_FLAGS_ERR)
4055 && u8Vector != X86_XCPT_PF
4056 && u8Vector != X86_XCPT_DF)
4057 {
4058 uErr |= X86_TRAP_ERR_EXTERNAL;
4059 }
4060 }
4061
4062 pVCpu->iem.s.cXcptRecursions++;
4063 pVCpu->iem.s.uCurXcpt = u8Vector;
4064 pVCpu->iem.s.fCurXcpt = fFlags;
4065 pVCpu->iem.s.uCurXcptErr = uErr;
4066 pVCpu->iem.s.uCurXcptCr2 = uCr2;
4067
4068 /*
4069 * Extensive logging.
4070 */
4071#if defined(LOG_ENABLED) && defined(IN_RING3)
4072 if (LogIs3Enabled())
4073 {
4074 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
4075 char szRegs[4096];
4076 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4077 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4078 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4079 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4080 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4081 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4082 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4083 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4084 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4085 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4086 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4087 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4088 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4089 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4090 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4091 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4092 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4093 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4094 " efer=%016VR{efer}\n"
4095 " pat=%016VR{pat}\n"
4096 " sf_mask=%016VR{sf_mask}\n"
4097 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4098 " lstar=%016VR{lstar}\n"
4099 " star=%016VR{star} cstar=%016VR{cstar}\n"
4100 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4101 );
4102
4103 char szInstr[256];
4104 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4105 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4106 szInstr, sizeof(szInstr), NULL);
4107 Log3(("%s%s\n", szRegs, szInstr));
4108 }
4109#endif /* LOG_ENABLED */
4110
4111 /*
4112 * Stats.
4113 */
4114 uint64_t const uTimestamp = ASMReadTSC();
4115 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
4116 {
4117 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
4118 EMHistoryAddExit(pVCpu,
4119 fFlags & IEM_XCPT_FLAGS_T_EXT_INT
4120 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_IEM, u8Vector)
4121 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_IEM, u8Vector | 0x100),
4122 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, uTimestamp);
4123 }
4124 else
4125 {
4126 if (u8Vector < RT_ELEMENTS(pVCpu->iem.s.aStatXcpts))
4127 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
4128 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
4129 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, uTimestamp);
4130 if (fFlags & IEM_XCPT_FLAGS_ERR)
4131 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector | EMEXIT_F_XCPT_ERRCD), uErr, uTimestamp);
4132 if (fFlags & IEM_XCPT_FLAGS_CR2)
4133 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector | EMEXIT_F_XCPT_CR2), uCr2, uTimestamp);
4134 }
4135
4136 /*
4137 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
4138 * to ensure that a stale TLB or paging cache entry will only cause one
4139 * spurious #PF.
4140 */
4141 if ( u8Vector == X86_XCPT_PF
4142 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
4143 IEMTlbInvalidatePage(pVCpu, uCr2);
4144
4145 /*
4146 * Call the mode specific worker function.
4147 */
4148 VBOXSTRICTRC rcStrict;
4149 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4150 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4151 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
4152 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4153 else
4154 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4155
4156 /* Flush the prefetch buffer. */
4157 iemOpcodeFlushHeavy(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
4158
4159 /*
4160 * Unwind.
4161 */
4162 pVCpu->iem.s.cXcptRecursions--;
4163 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
4164 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
4165 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
4166 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel,
4167 pVCpu->cpum.GstCtx.esp, IEM_GET_CPL(pVCpu), pVCpu->iem.s.cXcptRecursions + 1));
4168 return rcStrict;
4169}
4170
4171#ifdef IEM_WITH_SETJMP
4172/**
4173 * See iemRaiseXcptOrInt. Will not return.
4174 */
4175DECL_NO_RETURN(void)
4176iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
4177 uint8_t cbInstr,
4178 uint8_t u8Vector,
4179 uint32_t fFlags,
4180 uint16_t uErr,
4181 uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP
4182{
4183 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4184 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
4185}
4186#endif
4187
4188
4189/** \#DE - 00. */
4190VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
4191{
4192 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4193}
4194
4195
4196#ifdef IEM_WITH_SETJMP
4197/** \#DE - 00. */
4198DECL_NO_RETURN(void) iemRaiseDivideErrorJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4199{
4200 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4201}
4202#endif
4203
4204
4205/** \#DB - 01.
4206 * @note This automatically clear DR7.GD. */
4207VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
4208{
4209 /* This always clears RF (via IEM_XCPT_FLAGS_DRx_INSTR_BP). */
4210 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
4211 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_DRx_INSTR_BP, 0, 0);
4212}
4213
4214
4215/** \#BR - 05. */
4216VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
4217{
4218 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4219}
4220
4221
4222/** \#UD - 06. */
4223VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
4224{
4225 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4226}
4227
4228
4229#ifdef IEM_WITH_SETJMP
4230/** \#UD - 06. */
4231DECL_NO_RETURN(void) iemRaiseUndefinedOpcodeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4232{
4233 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4234}
4235#endif
4236
4237
4238/** \#NM - 07. */
4239VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
4240{
4241 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4242}
4243
4244
4245#ifdef IEM_WITH_SETJMP
4246/** \#NM - 07. */
4247DECL_NO_RETURN(void) iemRaiseDeviceNotAvailableJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4248{
4249 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4250}
4251#endif
4252
4253
4254/** \#TS(err) - 0a. */
4255VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4256{
4257 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4258}
4259
4260
4261/** \#TS(tr) - 0a. */
4262VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
4263{
4264 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4265 pVCpu->cpum.GstCtx.tr.Sel, 0);
4266}
4267
4268
4269/** \#TS(0) - 0a. */
4270VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4271{
4272 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4273 0, 0);
4274}
4275
4276
4277/** \#TS(err) - 0a. */
4278VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4279{
4280 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4281 uSel & X86_SEL_MASK_OFF_RPL, 0);
4282}
4283
4284
4285/** \#NP(err) - 0b. */
4286VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4287{
4288 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4289}
4290
4291
4292/** \#NP(sel) - 0b. */
4293VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4294{
4295 Log(("iemRaiseSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4296 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4297 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4298 uSel & ~X86_SEL_RPL, 0);
4299}
4300
4301
4302/** \#SS(seg) - 0c. */
4303VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4304{
4305 Log(("iemRaiseStackSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4306 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4307 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4308 uSel & ~X86_SEL_RPL, 0);
4309}
4310
4311
4312/** \#SS(err) - 0c. */
4313VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4314{
4315 Log(("iemRaiseStackSelectorNotPresentWithErr: cs:rip=%04x:%RX64 uErr=%#x\n",
4316 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4317 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4318}
4319
4320
4321/** \#GP(n) - 0d. */
4322VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4323{
4324 Log(("iemRaiseGeneralProtectionFault: cs:rip=%04x:%RX64 uErr=%#x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4325 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4326}
4327
4328
4329/** \#GP(0) - 0d. */
4330VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4331{
4332 Log(("iemRaiseGeneralProtectionFault0: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4333 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4334}
4335
4336#ifdef IEM_WITH_SETJMP
4337/** \#GP(0) - 0d. */
4338DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4339{
4340 Log(("iemRaiseGeneralProtectionFault0Jmp: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4341 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4342}
4343#endif
4344
4345
4346/** \#GP(sel) - 0d. */
4347VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4348{
4349 Log(("iemRaiseGeneralProtectionFaultBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
4350 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4351 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4352 Sel & ~X86_SEL_RPL, 0);
4353}
4354
4355
4356/** \#GP(0) - 0d. */
4357VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4358{
4359 Log(("iemRaiseNotCanonical: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4360 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4361}
4362
4363
4364/** \#GP(sel) - 0d. */
4365VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4366{
4367 Log(("iemRaiseSelectorBounds: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4368 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4369 NOREF(iSegReg); NOREF(fAccess);
4370 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4371 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4372}
4373
4374#ifdef IEM_WITH_SETJMP
4375/** \#GP(sel) - 0d, longjmp. */
4376DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4377{
4378 Log(("iemRaiseSelectorBoundsJmp: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4379 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4380 NOREF(iSegReg); NOREF(fAccess);
4381 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4382 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4383}
4384#endif
4385
4386/** \#GP(sel) - 0d. */
4387VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4388{
4389 Log(("iemRaiseSelectorBoundsBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
4390 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4391 NOREF(Sel);
4392 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4393}
4394
4395#ifdef IEM_WITH_SETJMP
4396/** \#GP(sel) - 0d, longjmp. */
4397DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP
4398{
4399 Log(("iemRaiseSelectorBoundsBySelectorJmp: cs:rip=%04x:%RX64 Sel=%#x\n",
4400 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4401 NOREF(Sel);
4402 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4403}
4404#endif
4405
4406
4407/** \#GP(sel) - 0d. */
4408VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4409{
4410 Log(("iemRaiseSelectorInvalidAccess: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4411 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4412 NOREF(iSegReg); NOREF(fAccess);
4413 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4414}
4415
4416#ifdef IEM_WITH_SETJMP
4417/** \#GP(sel) - 0d, longjmp. */
4418DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4419{
4420 NOREF(iSegReg); NOREF(fAccess);
4421 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4422}
4423#endif
4424
4425
4426/** \#PF(n) - 0e. */
4427VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT
4428{
4429 uint16_t uErr;
4430 switch (rc)
4431 {
4432 case VERR_PAGE_NOT_PRESENT:
4433 case VERR_PAGE_TABLE_NOT_PRESENT:
4434 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4435 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4436 uErr = 0;
4437 break;
4438
4439 default:
4440 AssertMsgFailed(("%Rrc\n", rc));
4441 RT_FALL_THRU();
4442 case VERR_ACCESS_DENIED:
4443 uErr = X86_TRAP_PF_P;
4444 break;
4445
4446 /** @todo reserved */
4447 }
4448
4449 if (IEM_GET_CPL(pVCpu) == 3)
4450 uErr |= X86_TRAP_PF_US;
4451
4452 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4453 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4454 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4455 uErr |= X86_TRAP_PF_ID;
4456
4457#if 0 /* This is so much non-sense, really. Why was it done like that? */
4458 /* Note! RW access callers reporting a WRITE protection fault, will clear
4459 the READ flag before calling. So, read-modify-write accesses (RW)
4460 can safely be reported as READ faults. */
4461 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4462 uErr |= X86_TRAP_PF_RW;
4463#else
4464 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4465 {
4466 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4467 /// (regardless of outcome of the comparison in the latter case).
4468 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4469 uErr |= X86_TRAP_PF_RW;
4470 }
4471#endif
4472
4473 /* For FXSAVE and FRSTOR the #PF is typically reported at the max address
4474 of the memory operand rather than at the start of it. (Not sure what
4475 happens if it crosses a page boundrary.) The current heuristics for
4476 this is to report the #PF for the last byte if the access is more than
4477 64 bytes. This is probably not correct, but we can work that out later,
4478 main objective now is to get FXSAVE to work like for real hardware and
4479 make bs3-cpu-basic2 work. */
4480 if (cbAccess <= 64)
4481 { /* likely*/ }
4482 else
4483 GCPtrWhere += cbAccess - 1;
4484
4485 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4486 uErr, GCPtrWhere);
4487}
4488
4489#ifdef IEM_WITH_SETJMP
4490/** \#PF(n) - 0e, longjmp. */
4491DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess,
4492 uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP
4493{
4494 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, cbAccess, fAccess, rc)));
4495}
4496#endif
4497
4498
4499/** \#MF(0) - 10. */
4500VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT
4501{
4502 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)
4503 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4504
4505 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
4506 PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
4507 return iemRegUpdateRipAndFinishClearingRF(pVCpu);
4508}
4509
4510#ifdef IEM_WITH_SETJMP
4511/** \#MF(0) - 10, longjmp. */
4512DECL_NO_RETURN(void) iemRaiseMathFaultJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4513{
4514 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseMathFault(pVCpu)));
4515}
4516#endif
4517
4518
4519/** \#AC(0) - 11. */
4520VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT
4521{
4522 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4523}
4524
4525#ifdef IEM_WITH_SETJMP
4526/** \#AC(0) - 11, longjmp. */
4527DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4528{
4529 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4530}
4531#endif
4532
4533
4534/** \#XF(0)/\#XM(0) - 19. */
4535VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
4536{
4537 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4538}
4539
4540
4541#ifdef IEM_WITH_SETJMP
4542/** \#XF(0)/\#XM(0) - 19s, longjmp. */
4543DECL_NO_RETURN(void) iemRaiseSimdFpExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4544{
4545 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseSimdFpException(pVCpu)));
4546}
4547#endif
4548
4549
4550/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4551IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4552{
4553 NOREF(cbInstr);
4554 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4555}
4556
4557
4558/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4559IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4560{
4561 NOREF(cbInstr);
4562 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4563}
4564
4565
4566/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4567IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4568{
4569 NOREF(cbInstr);
4570 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4571}
4572
4573
4574/** @} */
4575
4576/** @name Common opcode decoders.
4577 * @{
4578 */
4579//#include <iprt/mem.h>
4580
4581/**
4582 * Used to add extra details about a stub case.
4583 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4584 */
4585void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4586{
4587#if defined(LOG_ENABLED) && defined(IN_RING3)
4588 PVM pVM = pVCpu->CTX_SUFF(pVM);
4589 char szRegs[4096];
4590 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4591 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4592 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4593 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4594 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4595 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4596 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4597 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4598 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4599 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4600 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4601 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4602 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4603 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4604 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4605 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4606 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4607 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4608 " efer=%016VR{efer}\n"
4609 " pat=%016VR{pat}\n"
4610 " sf_mask=%016VR{sf_mask}\n"
4611 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4612 " lstar=%016VR{lstar}\n"
4613 " star=%016VR{star} cstar=%016VR{cstar}\n"
4614 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4615 );
4616
4617 char szInstr[256];
4618 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4619 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4620 szInstr, sizeof(szInstr), NULL);
4621
4622 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4623#else
4624 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4625#endif
4626}
4627
4628/** @} */
4629
4630
4631
4632/** @name Register Access.
4633 * @{
4634 */
4635
4636/**
4637 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4638 *
4639 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4640 * segment limit.
4641 *
4642 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4643 * @param cbInstr Instruction size.
4644 * @param offNextInstr The offset of the next instruction.
4645 * @param enmEffOpSize Effective operand size.
4646 */
4647VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
4648 IEMMODE enmEffOpSize) RT_NOEXCEPT
4649{
4650 switch (enmEffOpSize)
4651 {
4652 case IEMMODE_16BIT:
4653 {
4654 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
4655 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4656 || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */))
4657 pVCpu->cpum.GstCtx.rip = uNewIp;
4658 else
4659 return iemRaiseGeneralProtectionFault0(pVCpu);
4660 break;
4661 }
4662
4663 case IEMMODE_32BIT:
4664 {
4665 Assert(!IEM_IS_64BIT_CODE(pVCpu));
4666 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4667
4668 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
4669 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4670 pVCpu->cpum.GstCtx.rip = uNewEip;
4671 else
4672 return iemRaiseGeneralProtectionFault0(pVCpu);
4673 break;
4674 }
4675
4676 case IEMMODE_64BIT:
4677 {
4678 Assert(IEM_IS_64BIT_CODE(pVCpu));
4679
4680 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4681 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4682 pVCpu->cpum.GstCtx.rip = uNewRip;
4683 else
4684 return iemRaiseGeneralProtectionFault0(pVCpu);
4685 break;
4686 }
4687
4688 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4689 }
4690
4691#ifndef IEM_WITH_CODE_TLB
4692 /* Flush the prefetch buffer. */
4693 pVCpu->iem.s.cbOpcode = cbInstr;
4694#endif
4695
4696 /*
4697 * Clear RF and finish the instruction (maybe raise #DB).
4698 */
4699 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
4700}
4701
4702
4703/**
4704 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4705 *
4706 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4707 * segment limit.
4708 *
4709 * @returns Strict VBox status code.
4710 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4711 * @param cbInstr Instruction size.
4712 * @param offNextInstr The offset of the next instruction.
4713 */
4714VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT
4715{
4716 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
4717
4718 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
4719 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4720 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checking in 64-bit mode */))
4721 pVCpu->cpum.GstCtx.rip = uNewIp;
4722 else
4723 return iemRaiseGeneralProtectionFault0(pVCpu);
4724
4725#ifndef IEM_WITH_CODE_TLB
4726 /* Flush the prefetch buffer. */
4727 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4728#endif
4729
4730 /*
4731 * Clear RF and finish the instruction (maybe raise #DB).
4732 */
4733 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
4734}
4735
4736
4737/**
4738 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4739 *
4740 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4741 * segment limit.
4742 *
4743 * @returns Strict VBox status code.
4744 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4745 * @param cbInstr Instruction size.
4746 * @param offNextInstr The offset of the next instruction.
4747 * @param enmEffOpSize Effective operand size.
4748 */
4749VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
4750 IEMMODE enmEffOpSize) RT_NOEXCEPT
4751{
4752 if (enmEffOpSize == IEMMODE_32BIT)
4753 {
4754 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
4755
4756 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
4757 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4758 pVCpu->cpum.GstCtx.rip = uNewEip;
4759 else
4760 return iemRaiseGeneralProtectionFault0(pVCpu);
4761 }
4762 else
4763 {
4764 Assert(enmEffOpSize == IEMMODE_64BIT);
4765
4766 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4767 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4768 pVCpu->cpum.GstCtx.rip = uNewRip;
4769 else
4770 return iemRaiseGeneralProtectionFault0(pVCpu);
4771 }
4772
4773#ifndef IEM_WITH_CODE_TLB
4774 /* Flush the prefetch buffer. */
4775 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4776#endif
4777
4778 /*
4779 * Clear RF and finish the instruction (maybe raise #DB).
4780 */
4781 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
4782}
4783
4784/** @} */
4785
4786
4787/** @name FPU access and helpers.
4788 *
4789 * @{
4790 */
4791
4792/**
4793 * Updates the x87.DS and FPUDP registers.
4794 *
4795 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4796 * @param pFpuCtx The FPU context.
4797 * @param iEffSeg The effective segment register.
4798 * @param GCPtrEff The effective address relative to @a iEffSeg.
4799 */
4800DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4801{
4802 RTSEL sel;
4803 switch (iEffSeg)
4804 {
4805 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
4806 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
4807 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
4808 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
4809 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
4810 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
4811 default:
4812 AssertMsgFailed(("%d\n", iEffSeg));
4813 sel = pVCpu->cpum.GstCtx.ds.Sel;
4814 }
4815 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
4816 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4817 {
4818 pFpuCtx->DS = 0;
4819 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
4820 }
4821 else if (!IEM_IS_LONG_MODE(pVCpu)) /** @todo this is weird. explain. */
4822 {
4823 pFpuCtx->DS = sel;
4824 pFpuCtx->FPUDP = GCPtrEff;
4825 }
4826 else
4827 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
4828}
4829
4830
4831/**
4832 * Rotates the stack registers in the push direction.
4833 *
4834 * @param pFpuCtx The FPU context.
4835 * @remarks This is a complete waste of time, but fxsave stores the registers in
4836 * stack order.
4837 */
4838DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
4839{
4840 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
4841 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
4842 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
4843 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
4844 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
4845 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
4846 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
4847 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
4848 pFpuCtx->aRegs[0].r80 = r80Tmp;
4849}
4850
4851
4852/**
4853 * Rotates the stack registers in the pop direction.
4854 *
4855 * @param pFpuCtx The FPU context.
4856 * @remarks This is a complete waste of time, but fxsave stores the registers in
4857 * stack order.
4858 */
4859DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
4860{
4861 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
4862 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
4863 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
4864 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
4865 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
4866 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
4867 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
4868 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
4869 pFpuCtx->aRegs[7].r80 = r80Tmp;
4870}
4871
4872
4873/**
4874 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
4875 * exception prevents it.
4876 *
4877 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4878 * @param pResult The FPU operation result to push.
4879 * @param pFpuCtx The FPU context.
4880 */
4881static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4882{
4883 /* Update FSW and bail if there are pending exceptions afterwards. */
4884 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4885 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4886 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4887 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4888 {
4889 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))
4890 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",
4891 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4892 pFpuCtx->FSW = fFsw;
4893 return;
4894 }
4895
4896 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4897 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4898 {
4899 /* All is fine, push the actual value. */
4900 pFpuCtx->FTW |= RT_BIT(iNewTop);
4901 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
4902 }
4903 else if (pFpuCtx->FCW & X86_FCW_IM)
4904 {
4905 /* Masked stack overflow, push QNaN. */
4906 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4907 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4908 }
4909 else
4910 {
4911 /* Raise stack overflow, don't push anything. */
4912 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4913 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4914 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",
4915 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4916 return;
4917 }
4918
4919 fFsw &= ~X86_FSW_TOP_MASK;
4920 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4921 pFpuCtx->FSW = fFsw;
4922
4923 iemFpuRotateStackPush(pFpuCtx);
4924 RT_NOREF(pVCpu);
4925}
4926
4927
4928/**
4929 * Stores a result in a FPU register and updates the FSW and FTW.
4930 *
4931 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4932 * @param pFpuCtx The FPU context.
4933 * @param pResult The result to store.
4934 * @param iStReg Which FPU register to store it in.
4935 */
4936static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4937{
4938 Assert(iStReg < 8);
4939 uint16_t fNewFsw = pFpuCtx->FSW;
4940 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;
4941 fNewFsw &= ~X86_FSW_C_MASK;
4942 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4943 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4944 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4945 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4946 pFpuCtx->FSW = fNewFsw;
4947 pFpuCtx->FTW |= RT_BIT(iReg);
4948 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
4949 RT_NOREF(pVCpu);
4950}
4951
4952
4953/**
4954 * Only updates the FPU status word (FSW) with the result of the current
4955 * instruction.
4956 *
4957 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4958 * @param pFpuCtx The FPU context.
4959 * @param u16FSW The FSW output of the current instruction.
4960 */
4961static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
4962{
4963 uint16_t fNewFsw = pFpuCtx->FSW;
4964 fNewFsw &= ~X86_FSW_C_MASK;
4965 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;
4966 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4967 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4968 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4969 pFpuCtx->FSW = fNewFsw;
4970 RT_NOREF(pVCpu);
4971}
4972
4973
4974/**
4975 * Pops one item off the FPU stack if no pending exception prevents it.
4976 *
4977 * @param pFpuCtx The FPU context.
4978 */
4979static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4980{
4981 /* Check pending exceptions. */
4982 uint16_t uFSW = pFpuCtx->FSW;
4983 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4984 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4985 return;
4986
4987 /* TOP--. */
4988 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
4989 uFSW &= ~X86_FSW_TOP_MASK;
4990 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4991 pFpuCtx->FSW = uFSW;
4992
4993 /* Mark the previous ST0 as empty. */
4994 iOldTop >>= X86_FSW_TOP_SHIFT;
4995 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
4996
4997 /* Rotate the registers. */
4998 iemFpuRotateStackPop(pFpuCtx);
4999}
5000
5001
5002/**
5003 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
5004 *
5005 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5006 * @param pResult The FPU operation result to push.
5007 * @param uFpuOpcode The FPU opcode value.
5008 */
5009void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
5010{
5011 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5012 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5013 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
5014}
5015
5016
5017/**
5018 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
5019 * and sets FPUDP and FPUDS.
5020 *
5021 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5022 * @param pResult The FPU operation result to push.
5023 * @param iEffSeg The effective segment register.
5024 * @param GCPtrEff The effective address relative to @a iEffSeg.
5025 * @param uFpuOpcode The FPU opcode value.
5026 */
5027void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5028 uint16_t uFpuOpcode) RT_NOEXCEPT
5029{
5030 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5031 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5032 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5033 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
5034}
5035
5036
5037/**
5038 * Replace ST0 with the first value and push the second onto the FPU stack,
5039 * unless a pending exception prevents it.
5040 *
5041 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5042 * @param pResult The FPU operation result to store and push.
5043 * @param uFpuOpcode The FPU opcode value.
5044 */
5045void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
5046{
5047 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5048 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5049
5050 /* Update FSW and bail if there are pending exceptions afterwards. */
5051 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5052 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5053 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5054 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5055 {
5056 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
5057 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",
5058 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
5059 pFpuCtx->FSW = fFsw;
5060 return;
5061 }
5062
5063 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5064 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5065 {
5066 /* All is fine, push the actual value. */
5067 pFpuCtx->FTW |= RT_BIT(iNewTop);
5068 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
5069 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
5070 }
5071 else if (pFpuCtx->FCW & X86_FCW_IM)
5072 {
5073 /* Masked stack overflow, push QNaN. */
5074 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5075 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5076 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5077 }
5078 else
5079 {
5080 /* Raise stack overflow, don't push anything. */
5081 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5082 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5083 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",
5084 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5085 return;
5086 }
5087
5088 fFsw &= ~X86_FSW_TOP_MASK;
5089 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5090 pFpuCtx->FSW = fFsw;
5091
5092 iemFpuRotateStackPush(pFpuCtx);
5093}
5094
5095
5096/**
5097 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5098 * FOP.
5099 *
5100 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5101 * @param pResult The result to store.
5102 * @param iStReg Which FPU register to store it in.
5103 * @param uFpuOpcode The FPU opcode value.
5104 */
5105void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5106{
5107 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5108 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5109 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5110}
5111
5112
5113/**
5114 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5115 * FOP, and then pops the stack.
5116 *
5117 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5118 * @param pResult The result to store.
5119 * @param iStReg Which FPU register to store it in.
5120 * @param uFpuOpcode The FPU opcode value.
5121 */
5122void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5123{
5124 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5125 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5126 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5127 iemFpuMaybePopOne(pFpuCtx);
5128}
5129
5130
5131/**
5132 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5133 * FPUDP, and FPUDS.
5134 *
5135 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5136 * @param pResult The result to store.
5137 * @param iStReg Which FPU register to store it in.
5138 * @param iEffSeg The effective memory operand selector register.
5139 * @param GCPtrEff The effective memory operand offset.
5140 * @param uFpuOpcode The FPU opcode value.
5141 */
5142void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5143 uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5144{
5145 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5146 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5147 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5148 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5149}
5150
5151
5152/**
5153 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5154 * FPUDP, and FPUDS, and then pops the stack.
5155 *
5156 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5157 * @param pResult The result to store.
5158 * @param iStReg Which FPU register to store it in.
5159 * @param iEffSeg The effective memory operand selector register.
5160 * @param GCPtrEff The effective memory operand offset.
5161 * @param uFpuOpcode The FPU opcode value.
5162 */
5163void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
5164 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5165{
5166 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5167 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5168 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5169 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5170 iemFpuMaybePopOne(pFpuCtx);
5171}
5172
5173
5174/**
5175 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5176 *
5177 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5178 * @param uFpuOpcode The FPU opcode value.
5179 */
5180void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5181{
5182 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5183 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5184}
5185
5186
5187/**
5188 * Updates the FSW, FOP, FPUIP, and FPUCS.
5189 *
5190 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5191 * @param u16FSW The FSW from the current instruction.
5192 * @param uFpuOpcode The FPU opcode value.
5193 */
5194void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5195{
5196 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5197 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5198 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5199}
5200
5201
5202/**
5203 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5204 *
5205 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5206 * @param u16FSW The FSW from the current instruction.
5207 * @param uFpuOpcode The FPU opcode value.
5208 */
5209void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5210{
5211 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5212 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5213 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5214 iemFpuMaybePopOne(pFpuCtx);
5215}
5216
5217
5218/**
5219 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5220 *
5221 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5222 * @param u16FSW The FSW from the current instruction.
5223 * @param iEffSeg The effective memory operand selector register.
5224 * @param GCPtrEff The effective memory operand offset.
5225 * @param uFpuOpcode The FPU opcode value.
5226 */
5227void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5228{
5229 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5230 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5231 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5232 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5233}
5234
5235
5236/**
5237 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5238 *
5239 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5240 * @param u16FSW The FSW from the current instruction.
5241 * @param uFpuOpcode The FPU opcode value.
5242 */
5243void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5244{
5245 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5246 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5247 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5248 iemFpuMaybePopOne(pFpuCtx);
5249 iemFpuMaybePopOne(pFpuCtx);
5250}
5251
5252
5253/**
5254 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5255 *
5256 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5257 * @param u16FSW The FSW from the current instruction.
5258 * @param iEffSeg The effective memory operand selector register.
5259 * @param GCPtrEff The effective memory operand offset.
5260 * @param uFpuOpcode The FPU opcode value.
5261 */
5262void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5263 uint16_t uFpuOpcode) RT_NOEXCEPT
5264{
5265 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5266 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5267 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5268 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5269 iemFpuMaybePopOne(pFpuCtx);
5270}
5271
5272
5273/**
5274 * Worker routine for raising an FPU stack underflow exception.
5275 *
5276 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5277 * @param pFpuCtx The FPU context.
5278 * @param iStReg The stack register being accessed.
5279 */
5280static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5281{
5282 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5283 if (pFpuCtx->FCW & X86_FCW_IM)
5284 {
5285 /* Masked underflow. */
5286 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5287 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5288 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5289 if (iStReg != UINT8_MAX)
5290 {
5291 pFpuCtx->FTW |= RT_BIT(iReg);
5292 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5293 }
5294 }
5295 else
5296 {
5297 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5298 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5299 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",
5300 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5301 }
5302 RT_NOREF(pVCpu);
5303}
5304
5305
5306/**
5307 * Raises a FPU stack underflow exception.
5308 *
5309 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5310 * @param iStReg The destination register that should be loaded
5311 * with QNaN if \#IS is not masked. Specify
5312 * UINT8_MAX if none (like for fcom).
5313 * @param uFpuOpcode The FPU opcode value.
5314 */
5315void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5316{
5317 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5318 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5319 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5320}
5321
5322
5323void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5324{
5325 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5326 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5327 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5328 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5329}
5330
5331
5332void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5333{
5334 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5335 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5336 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5337 iemFpuMaybePopOne(pFpuCtx);
5338}
5339
5340
5341void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5342 uint16_t uFpuOpcode) RT_NOEXCEPT
5343{
5344 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5345 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5346 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5347 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5348 iemFpuMaybePopOne(pFpuCtx);
5349}
5350
5351
5352void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5353{
5354 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5355 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5356 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
5357 iemFpuMaybePopOne(pFpuCtx);
5358 iemFpuMaybePopOne(pFpuCtx);
5359}
5360
5361
5362void iemFpuStackPushUnderflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5363{
5364 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5365 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5366
5367 if (pFpuCtx->FCW & X86_FCW_IM)
5368 {
5369 /* Masked overflow - Push QNaN. */
5370 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5371 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5372 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5373 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5374 pFpuCtx->FTW |= RT_BIT(iNewTop);
5375 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5376 iemFpuRotateStackPush(pFpuCtx);
5377 }
5378 else
5379 {
5380 /* Exception pending - don't change TOP or the register stack. */
5381 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5382 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5383 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",
5384 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5385 }
5386}
5387
5388
5389void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5390{
5391 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5392 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5393
5394 if (pFpuCtx->FCW & X86_FCW_IM)
5395 {
5396 /* Masked overflow - Push QNaN. */
5397 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5398 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5399 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5400 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5401 pFpuCtx->FTW |= RT_BIT(iNewTop);
5402 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5403 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5404 iemFpuRotateStackPush(pFpuCtx);
5405 }
5406 else
5407 {
5408 /* Exception pending - don't change TOP or the register stack. */
5409 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5410 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5411 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",
5412 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5413 }
5414}
5415
5416
5417/**
5418 * Worker routine for raising an FPU stack overflow exception on a push.
5419 *
5420 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5421 * @param pFpuCtx The FPU context.
5422 */
5423static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5424{
5425 if (pFpuCtx->FCW & X86_FCW_IM)
5426 {
5427 /* Masked overflow. */
5428 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5429 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5430 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5431 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5432 pFpuCtx->FTW |= RT_BIT(iNewTop);
5433 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5434 iemFpuRotateStackPush(pFpuCtx);
5435 }
5436 else
5437 {
5438 /* Exception pending - don't change TOP or the register stack. */
5439 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5440 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5441 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",
5442 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5443 }
5444 RT_NOREF(pVCpu);
5445}
5446
5447
5448/**
5449 * Raises a FPU stack overflow exception on a push.
5450 *
5451 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5452 * @param uFpuOpcode The FPU opcode value.
5453 */
5454void iemFpuStackPushOverflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5455{
5456 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5457 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5458 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5459}
5460
5461
5462/**
5463 * Raises a FPU stack overflow exception on a push with a memory operand.
5464 *
5465 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5466 * @param iEffSeg The effective memory operand selector register.
5467 * @param GCPtrEff The effective memory operand offset.
5468 * @param uFpuOpcode The FPU opcode value.
5469 */
5470void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5471{
5472 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5473 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5474 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5475 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5476}
5477
5478/** @} */
5479
5480
5481/** @name SSE+AVX SIMD access and helpers.
5482 *
5483 * @{
5484 */
5485/**
5486 * Stores a result in a SIMD XMM register, updates the MXCSR.
5487 *
5488 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5489 * @param pResult The result to store.
5490 * @param iXmmReg Which SIMD XMM register to store the result in.
5491 */
5492void iemSseStoreResult(PVMCPUCC pVCpu, PCIEMSSERESULT pResult, uint8_t iXmmReg) RT_NOEXCEPT
5493{
5494 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5495 pFpuCtx->MXCSR |= pResult->MXCSR & X86_MXCSR_XCPT_FLAGS;
5496
5497 /* The result is only updated if there is no unmasked exception pending. */
5498 if (( ~((pFpuCtx->MXCSR & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT)
5499 & (pFpuCtx->MXCSR & X86_MXCSR_XCPT_FLAGS)) == 0)
5500 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXmmReg] = pResult->uResult;
5501}
5502
5503
5504/**
5505 * Updates the MXCSR.
5506 *
5507 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5508 * @param fMxcsr The new MXCSR value.
5509 */
5510void iemSseUpdateMxcsr(PVMCPUCC pVCpu, uint32_t fMxcsr) RT_NOEXCEPT
5511{
5512 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5513 pFpuCtx->MXCSR |= fMxcsr & X86_MXCSR_XCPT_FLAGS;
5514}
5515/** @} */
5516
5517
5518/** @name Memory access.
5519 *
5520 * @{
5521 */
5522
5523#undef LOG_GROUP
5524#define LOG_GROUP LOG_GROUP_IEM_MEM
5525
5526/**
5527 * Updates the IEMCPU::cbWritten counter if applicable.
5528 *
5529 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5530 * @param fAccess The access being accounted for.
5531 * @param cbMem The access size.
5532 */
5533DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5534{
5535 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5536 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5537 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5538}
5539
5540
5541/**
5542 * Applies the segment limit, base and attributes.
5543 *
5544 * This may raise a \#GP or \#SS.
5545 *
5546 * @returns VBox strict status code.
5547 *
5548 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5549 * @param fAccess The kind of access which is being performed.
5550 * @param iSegReg The index of the segment register to apply.
5551 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5552 * TSS, ++).
5553 * @param cbMem The access size.
5554 * @param pGCPtrMem Pointer to the guest memory address to apply
5555 * segmentation to. Input and output parameter.
5556 */
5557VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5558{
5559 if (iSegReg == UINT8_MAX)
5560 return VINF_SUCCESS;
5561
5562 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5563 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5564 switch (IEM_GET_CPU_MODE(pVCpu))
5565 {
5566 case IEMMODE_16BIT:
5567 case IEMMODE_32BIT:
5568 {
5569 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5570 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5571
5572 if ( pSel->Attr.n.u1Present
5573 && !pSel->Attr.n.u1Unusable)
5574 {
5575 Assert(pSel->Attr.n.u1DescType);
5576 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5577 {
5578 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5579 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5580 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5581
5582 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5583 {
5584 /** @todo CPL check. */
5585 }
5586
5587 /*
5588 * There are two kinds of data selectors, normal and expand down.
5589 */
5590 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5591 {
5592 if ( GCPtrFirst32 > pSel->u32Limit
5593 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5594 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5595 }
5596 else
5597 {
5598 /*
5599 * The upper boundary is defined by the B bit, not the G bit!
5600 */
5601 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5602 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5603 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5604 }
5605 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5606 }
5607 else
5608 {
5609 /*
5610 * Code selector and usually be used to read thru, writing is
5611 * only permitted in real and V8086 mode.
5612 */
5613 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5614 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5615 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5616 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5617 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5618
5619 if ( GCPtrFirst32 > pSel->u32Limit
5620 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5621 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5622
5623 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5624 {
5625 /** @todo CPL check. */
5626 }
5627
5628 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5629 }
5630 }
5631 else
5632 return iemRaiseGeneralProtectionFault0(pVCpu);
5633 return VINF_SUCCESS;
5634 }
5635
5636 case IEMMODE_64BIT:
5637 {
5638 RTGCPTR GCPtrMem = *pGCPtrMem;
5639 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5640 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5641
5642 Assert(cbMem >= 1);
5643 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5644 return VINF_SUCCESS;
5645 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5646 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5647 return iemRaiseGeneralProtectionFault0(pVCpu);
5648 }
5649
5650 default:
5651 AssertFailedReturn(VERR_IEM_IPE_7);
5652 }
5653}
5654
5655
5656/**
5657 * Translates a virtual address to a physical physical address and checks if we
5658 * can access the page as specified.
5659 *
5660 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5661 * @param GCPtrMem The virtual address.
5662 * @param cbAccess The access size, for raising \#PF correctly for
5663 * FXSAVE and such.
5664 * @param fAccess The intended access.
5665 * @param pGCPhysMem Where to return the physical address.
5666 */
5667VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess,
5668 uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5669{
5670 /** @todo Need a different PGM interface here. We're currently using
5671 * generic / REM interfaces. this won't cut it for R0. */
5672 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5673 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5674 * here. */
5675 PGMPTWALK Walk;
5676 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
5677 if (RT_FAILURE(rc))
5678 {
5679 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5680 /** @todo Check unassigned memory in unpaged mode. */
5681 /** @todo Reserved bits in page tables. Requires new PGM interface. */
5682#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5683 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5684 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5685#endif
5686 *pGCPhysMem = NIL_RTGCPHYS;
5687 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, rc);
5688 }
5689
5690 /* If the page is writable and does not have the no-exec bit set, all
5691 access is allowed. Otherwise we'll have to check more carefully... */
5692 if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
5693 {
5694 /* Write to read only memory? */
5695 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5696 && !(Walk.fEffective & X86_PTE_RW)
5697 && ( ( IEM_GET_CPL(pVCpu) == 3
5698 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5699 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
5700 {
5701 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5702 *pGCPhysMem = NIL_RTGCPHYS;
5703#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5704 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5705 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5706#endif
5707 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5708 }
5709
5710 /* Kernel memory accessed by userland? */
5711 if ( !(Walk.fEffective & X86_PTE_US)
5712 && IEM_GET_CPL(pVCpu) == 3
5713 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5714 {
5715 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5716 *pGCPhysMem = NIL_RTGCPHYS;
5717#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5718 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5719 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5720#endif
5721 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, VERR_ACCESS_DENIED);
5722 }
5723
5724 /* Executing non-executable memory? */
5725 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
5726 && (Walk.fEffective & X86_PTE_PAE_NX)
5727 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
5728 {
5729 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
5730 *pGCPhysMem = NIL_RTGCPHYS;
5731#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5732 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5733 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5734#endif
5735 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
5736 VERR_ACCESS_DENIED);
5737 }
5738 }
5739
5740 /*
5741 * Set the dirty / access flags.
5742 * ASSUMES this is set when the address is translated rather than on committ...
5743 */
5744 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5745 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
5746 if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty)
5747 {
5748 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
5749 AssertRC(rc2);
5750 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
5751 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
5752 }
5753
5754 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
5755 *pGCPhysMem = GCPhys;
5756 return VINF_SUCCESS;
5757}
5758
5759#if 0 /*unused*/
5760/**
5761 * Looks up a memory mapping entry.
5762 *
5763 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
5764 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5765 * @param pvMem The memory address.
5766 * @param fAccess The access to.
5767 */
5768DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
5769{
5770 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5771 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
5772 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
5773 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5774 return 0;
5775 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
5776 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5777 return 1;
5778 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
5779 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5780 return 2;
5781 return VERR_NOT_FOUND;
5782}
5783#endif
5784
5785/**
5786 * Finds a free memmap entry when using iNextMapping doesn't work.
5787 *
5788 * @returns Memory mapping index, 1024 on failure.
5789 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5790 */
5791static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
5792{
5793 /*
5794 * The easy case.
5795 */
5796 if (pVCpu->iem.s.cActiveMappings == 0)
5797 {
5798 pVCpu->iem.s.iNextMapping = 1;
5799 return 0;
5800 }
5801
5802 /* There should be enough mappings for all instructions. */
5803 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
5804
5805 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
5806 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
5807 return i;
5808
5809 AssertFailedReturn(1024);
5810}
5811
5812
5813/**
5814 * Commits a bounce buffer that needs writing back and unmaps it.
5815 *
5816 * @returns Strict VBox status code.
5817 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5818 * @param iMemMap The index of the buffer to commit.
5819 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
5820 * Always false in ring-3, obviously.
5821 */
5822static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
5823{
5824 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
5825 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
5826#ifdef IN_RING3
5827 Assert(!fPostponeFail);
5828 RT_NOREF_PV(fPostponeFail);
5829#endif
5830
5831 /*
5832 * Do the writing.
5833 */
5834 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5835 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
5836 {
5837 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
5838 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5839 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5840 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
5841 {
5842 /*
5843 * Carefully and efficiently dealing with access handler return
5844 * codes make this a little bloated.
5845 */
5846 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
5847 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5848 pbBuf,
5849 cbFirst,
5850 PGMACCESSORIGIN_IEM);
5851 if (rcStrict == VINF_SUCCESS)
5852 {
5853 if (cbSecond)
5854 {
5855 rcStrict = PGMPhysWrite(pVM,
5856 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5857 pbBuf + cbFirst,
5858 cbSecond,
5859 PGMACCESSORIGIN_IEM);
5860 if (rcStrict == VINF_SUCCESS)
5861 { /* nothing */ }
5862 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5863 {
5864 LogEx(LOG_GROUP_IEM,
5865 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
5866 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5867 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5868 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5869 }
5870#ifndef IN_RING3
5871 else if (fPostponeFail)
5872 {
5873 LogEx(LOG_GROUP_IEM,
5874 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5875 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5876 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5877 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5878 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5879 return iemSetPassUpStatus(pVCpu, rcStrict);
5880 }
5881#endif
5882 else
5883 {
5884 LogEx(LOG_GROUP_IEM,
5885 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5886 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5887 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5888 return rcStrict;
5889 }
5890 }
5891 }
5892 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5893 {
5894 if (!cbSecond)
5895 {
5896 LogEx(LOG_GROUP_IEM,
5897 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
5898 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5899 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5900 }
5901 else
5902 {
5903 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
5904 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5905 pbBuf + cbFirst,
5906 cbSecond,
5907 PGMACCESSORIGIN_IEM);
5908 if (rcStrict2 == VINF_SUCCESS)
5909 {
5910 LogEx(LOG_GROUP_IEM,
5911 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
5912 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5913 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5914 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5915 }
5916 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5917 {
5918 LogEx(LOG_GROUP_IEM,
5919 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
5920 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5921 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5922 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5923 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5924 }
5925#ifndef IN_RING3
5926 else if (fPostponeFail)
5927 {
5928 LogEx(LOG_GROUP_IEM,
5929 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5930 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5931 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5932 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5933 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5934 return iemSetPassUpStatus(pVCpu, rcStrict);
5935 }
5936#endif
5937 else
5938 {
5939 LogEx(LOG_GROUP_IEM,
5940 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5941 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5942 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5943 return rcStrict2;
5944 }
5945 }
5946 }
5947#ifndef IN_RING3
5948 else if (fPostponeFail)
5949 {
5950 LogEx(LOG_GROUP_IEM,
5951 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5952 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5953 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5954 if (!cbSecond)
5955 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
5956 else
5957 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
5958 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5959 return iemSetPassUpStatus(pVCpu, rcStrict);
5960 }
5961#endif
5962 else
5963 {
5964 LogEx(LOG_GROUP_IEM,
5965 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5966 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5967 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5968 return rcStrict;
5969 }
5970 }
5971 else
5972 {
5973 /*
5974 * No access handlers, much simpler.
5975 */
5976 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
5977 if (RT_SUCCESS(rc))
5978 {
5979 if (cbSecond)
5980 {
5981 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
5982 if (RT_SUCCESS(rc))
5983 { /* likely */ }
5984 else
5985 {
5986 LogEx(LOG_GROUP_IEM,
5987 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5988 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5989 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
5990 return rc;
5991 }
5992 }
5993 }
5994 else
5995 {
5996 LogEx(LOG_GROUP_IEM,
5997 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5998 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
5999 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6000 return rc;
6001 }
6002 }
6003 }
6004
6005#if defined(IEM_LOG_MEMORY_WRITES)
6006 Log5(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
6007 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
6008 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
6009 Log5(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
6010 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
6011 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
6012
6013 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
6014 g_cbIemWrote = cbWrote;
6015 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
6016#endif
6017
6018 /*
6019 * Free the mapping entry.
6020 */
6021 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6022 Assert(pVCpu->iem.s.cActiveMappings != 0);
6023 pVCpu->iem.s.cActiveMappings--;
6024 return VINF_SUCCESS;
6025}
6026
6027
6028/**
6029 * iemMemMap worker that deals with a request crossing pages.
6030 */
6031static VBOXSTRICTRC
6032iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, uint8_t *pbUnmapInfo,
6033 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
6034{
6035 Assert(cbMem <= GUEST_PAGE_SIZE);
6036
6037 /*
6038 * Do the address translations.
6039 */
6040 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
6041 RTGCPHYS GCPhysFirst;
6042 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
6043 if (rcStrict != VINF_SUCCESS)
6044 return rcStrict;
6045 Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
6046
6047 uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
6048 RTGCPHYS GCPhysSecond;
6049 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
6050 cbSecondPage, fAccess, &GCPhysSecond);
6051 if (rcStrict != VINF_SUCCESS)
6052 return rcStrict;
6053 Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
6054 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
6055
6056 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6057
6058 /*
6059 * Read in the current memory content if it's a read, execute or partial
6060 * write access.
6061 */
6062 uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6063
6064 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6065 {
6066 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6067 {
6068 /*
6069 * Must carefully deal with access handler status codes here,
6070 * makes the code a bit bloated.
6071 */
6072 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
6073 if (rcStrict == VINF_SUCCESS)
6074 {
6075 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6076 if (rcStrict == VINF_SUCCESS)
6077 { /*likely */ }
6078 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6079 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6080 else
6081 {
6082 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
6083 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6084 return rcStrict;
6085 }
6086 }
6087 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6088 {
6089 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6090 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6091 {
6092 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6093 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6094 }
6095 else
6096 {
6097 LogEx(LOG_GROUP_IEM,
6098 ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6099 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6100 return rcStrict2;
6101 }
6102 }
6103 else
6104 {
6105 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6106 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6107 return rcStrict;
6108 }
6109 }
6110 else
6111 {
6112 /*
6113 * No informational status codes here, much more straight forward.
6114 */
6115 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6116 if (RT_SUCCESS(rc))
6117 {
6118 Assert(rc == VINF_SUCCESS);
6119 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6120 if (RT_SUCCESS(rc))
6121 Assert(rc == VINF_SUCCESS);
6122 else
6123 {
6124 LogEx(LOG_GROUP_IEM,
6125 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6126 return rc;
6127 }
6128 }
6129 else
6130 {
6131 LogEx(LOG_GROUP_IEM,
6132 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6133 return rc;
6134 }
6135 }
6136 }
6137#ifdef VBOX_STRICT
6138 else
6139 memset(pbBuf, 0xcc, cbMem);
6140 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6141 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6142#endif
6143 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
6144
6145 /*
6146 * Commit the bounce buffer entry.
6147 */
6148 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6149 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6150 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6151 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6152 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
6153 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6154 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6155 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6156 pVCpu->iem.s.cActiveMappings++;
6157
6158 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6159 *ppvMem = pbBuf;
6160 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6161 return VINF_SUCCESS;
6162}
6163
6164
6165/**
6166 * iemMemMap woker that deals with iemMemPageMap failures.
6167 */
6168static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem,
6169 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6170{
6171 /*
6172 * Filter out conditions we can handle and the ones which shouldn't happen.
6173 */
6174 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6175 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6176 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6177 {
6178 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6179 return rcMap;
6180 }
6181 pVCpu->iem.s.cPotentialExits++;
6182
6183 /*
6184 * Read in the current memory content if it's a read, execute or partial
6185 * write access.
6186 */
6187 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6188 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6189 {
6190 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6191 memset(pbBuf, 0xff, cbMem);
6192 else
6193 {
6194 int rc;
6195 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6196 {
6197 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6198 if (rcStrict == VINF_SUCCESS)
6199 { /* nothing */ }
6200 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6201 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6202 else
6203 {
6204 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6205 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6206 return rcStrict;
6207 }
6208 }
6209 else
6210 {
6211 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
6212 if (RT_SUCCESS(rc))
6213 { /* likely */ }
6214 else
6215 {
6216 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6217 GCPhysFirst, rc));
6218 return rc;
6219 }
6220 }
6221 }
6222 }
6223#ifdef VBOX_STRICT
6224 else
6225 memset(pbBuf, 0xcc, cbMem);
6226#endif
6227#ifdef VBOX_STRICT
6228 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6229 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6230#endif
6231
6232 /*
6233 * Commit the bounce buffer entry.
6234 */
6235 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6236 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6237 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6238 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
6239 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6240 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6241 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6242 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6243 pVCpu->iem.s.cActiveMappings++;
6244
6245 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6246 *ppvMem = pbBuf;
6247 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6248 return VINF_SUCCESS;
6249}
6250
6251
6252
6253/**
6254 * Maps the specified guest memory for the given kind of access.
6255 *
6256 * This may be using bounce buffering of the memory if it's crossing a page
6257 * boundary or if there is an access handler installed for any of it. Because
6258 * of lock prefix guarantees, we're in for some extra clutter when this
6259 * happens.
6260 *
6261 * This may raise a \#GP, \#SS, \#PF or \#AC.
6262 *
6263 * @returns VBox strict status code.
6264 *
6265 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6266 * @param ppvMem Where to return the pointer to the mapped memory.
6267 * @param pbUnmapInfo Where to return unmap info to be passed to
6268 * iemMemCommitAndUnmap or iemMemRollbackAndUnmap when
6269 * done.
6270 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
6271 * 8, 12, 16, 32 or 512. When used by string operations
6272 * it can be up to a page.
6273 * @param iSegReg The index of the segment register to use for this
6274 * access. The base and limits are checked. Use UINT8_MAX
6275 * to indicate that no segmentation is required (for IDT,
6276 * GDT and LDT accesses).
6277 * @param GCPtrMem The address of the guest memory.
6278 * @param fAccess How the memory is being accessed. The
6279 * IEM_ACCESS_TYPE_XXX part is used to figure out how to
6280 * map the memory, while the IEM_ACCESS_WHAT_XXX part is
6281 * used when raising exceptions. The IEM_ACCESS_ATOMIC and
6282 * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be
6283 * set.
6284 * @param uAlignCtl Alignment control:
6285 * - Bits 15:0 is the alignment mask.
6286 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6287 * IEM_MEMMAP_F_ALIGN_SSE, and
6288 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6289 * Pass zero to skip alignment.
6290 */
6291VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
6292 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
6293{
6294 /*
6295 * Check the input and figure out which mapping entry to use.
6296 */
6297 Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));
6298 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94
6299 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );
6300 Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE)));
6301 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6302
6303 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6304 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6305 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6306 {
6307 iMemMap = iemMemMapFindFree(pVCpu);
6308 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6309 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6310 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6311 pVCpu->iem.s.aMemMappings[2].fAccess),
6312 VERR_IEM_IPE_9);
6313 }
6314
6315 /*
6316 * Map the memory, checking that we can actually access it. If something
6317 * slightly complicated happens, fall back on bounce buffering.
6318 */
6319 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6320 if (rcStrict == VINF_SUCCESS)
6321 { /* likely */ }
6322 else
6323 return rcStrict;
6324
6325 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
6326 { /* likely */ }
6327 else
6328 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
6329
6330 /*
6331 * Alignment check.
6332 */
6333 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6334 { /* likelyish */ }
6335 else
6336 {
6337 /* Misaligned access. */
6338 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6339 {
6340 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6341 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6342 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6343 {
6344 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6345
6346 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6347 return iemRaiseAlignmentCheckException(pVCpu);
6348 }
6349 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6350 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6351 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6352 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6353 * that's what FXSAVE does on a 10980xe. */
6354 && iemMemAreAlignmentChecksEnabled(pVCpu))
6355 return iemRaiseAlignmentCheckException(pVCpu);
6356 else
6357 return iemRaiseGeneralProtectionFault0(pVCpu);
6358 }
6359
6360#if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64)
6361 /* If the access is atomic there are host platform alignmnet restrictions
6362 we need to conform with. */
6363 if ( !(fAccess & IEM_ACCESS_ATOMIC)
6364# if defined(RT_ARCH_AMD64)
6365 || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */
6366# elif defined(RT_ARCH_ARM64)
6367 || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */
6368# else
6369# error port me
6370# endif
6371 )
6372 { /* okay */ }
6373 else
6374 {
6375 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem));
6376 pVCpu->iem.s.cMisalignedAtomics += 1;
6377 return VINF_EM_EMULATE_SPLIT_LOCK;
6378 }
6379#endif
6380 }
6381
6382#ifdef IEM_WITH_DATA_TLB
6383 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6384
6385 /*
6386 * Get the TLB entry for this page.
6387 */
6388 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6389 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6390 if (pTlbe->uTag == uTag)
6391 {
6392# ifdef VBOX_WITH_STATISTICS
6393 pVCpu->iem.s.DataTlb.cTlbHits++;
6394# endif
6395 }
6396 else
6397 {
6398 pVCpu->iem.s.DataTlb.cTlbMisses++;
6399 PGMPTWALK Walk;
6400 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6401 if (RT_FAILURE(rc))
6402 {
6403 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6404# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6405 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6406 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6407# endif
6408 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6409 }
6410
6411 Assert(Walk.fSucceeded);
6412 pTlbe->uTag = uTag;
6413 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6414 pTlbe->GCPhys = Walk.GCPhys;
6415 pTlbe->pbMappingR3 = NULL;
6416 }
6417
6418 /*
6419 * Check TLB page table level access flags.
6420 */
6421 /* If the page is either supervisor only or non-writable, we need to do
6422 more careful access checks. */
6423 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
6424 {
6425 /* Write to read only memory? */
6426 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
6427 && (fAccess & IEM_ACCESS_TYPE_WRITE)
6428 && ( ( IEM_GET_CPL(pVCpu) == 3
6429 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6430 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
6431 {
6432 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6433# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6434 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6435 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6436# endif
6437 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6438 }
6439
6440 /* Kernel memory accessed by userland? */
6441 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6442 && IEM_GET_CPL(pVCpu) == 3
6443 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6444 {
6445 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6446# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6447 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6448 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6449# endif
6450 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6451 }
6452 }
6453
6454 /*
6455 * Set the dirty / access flags.
6456 * ASSUMES this is set when the address is translated rather than on commit...
6457 */
6458 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6459 uint64_t const fTlbAccessedDirty = (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0) | IEMTLBE_F_PT_NO_ACCESSED;
6460 if (pTlbe->fFlagsAndPhysRev & fTlbAccessedDirty)
6461 {
6462 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6463 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6464 AssertRC(rc2);
6465 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6466 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6467 pTlbe->fFlagsAndPhysRev &= ~fTlbAccessedDirty;
6468 }
6469
6470 /*
6471 * Look up the physical page info if necessary.
6472 */
6473 uint8_t *pbMem = NULL;
6474 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6475# ifdef IN_RING3
6476 pbMem = pTlbe->pbMappingR3;
6477# else
6478 pbMem = NULL;
6479# endif
6480 else
6481 {
6482 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6483 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6484 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6485 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6486 AssertCompile(PGMIEMGCPHYS2PTR_F_CODE_PAGE == IEMTLBE_F_PG_CODE_PAGE);
6487 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
6488 { /* likely */ }
6489 else
6490 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
6491 pTlbe->pbMappingR3 = NULL;
6492 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6493 | IEMTLBE_F_NO_MAPPINGR3
6494 | IEMTLBE_F_PG_NO_READ
6495 | IEMTLBE_F_PG_NO_WRITE
6496 | IEMTLBE_F_PG_UNASSIGNED
6497 | IEMTLBE_F_PG_CODE_PAGE);
6498 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6499 &pbMem, &pTlbe->fFlagsAndPhysRev);
6500 AssertRCReturn(rc, rc);
6501# ifdef IN_RING3
6502 pTlbe->pbMappingR3 = pbMem;
6503# endif
6504 }
6505
6506 /*
6507 * Check the physical page level access and mapping.
6508 */
6509 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
6510 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
6511 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
6512 { /* probably likely */ }
6513 else
6514 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem,
6515 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6516 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6517 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6518 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6519 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6520
6521 if (pbMem)
6522 {
6523 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6524 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6525 fAccess |= IEM_ACCESS_NOT_LOCKED;
6526 }
6527 else
6528 {
6529 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6530 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6531 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6532 if (rcStrict != VINF_SUCCESS)
6533 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6534 }
6535
6536 void * const pvMem = pbMem;
6537
6538 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6539 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6540 if (fAccess & IEM_ACCESS_TYPE_READ)
6541 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6542
6543#else /* !IEM_WITH_DATA_TLB */
6544
6545 RTGCPHYS GCPhysFirst;
6546 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6547 if (rcStrict != VINF_SUCCESS)
6548 return rcStrict;
6549
6550 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6551 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6552 if (fAccess & IEM_ACCESS_TYPE_READ)
6553 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6554
6555 void *pvMem;
6556 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6557 if (rcStrict != VINF_SUCCESS)
6558 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6559
6560#endif /* !IEM_WITH_DATA_TLB */
6561
6562 /*
6563 * Fill in the mapping table entry.
6564 */
6565 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6566 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6567 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6568 pVCpu->iem.s.cActiveMappings += 1;
6569
6570 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6571 *ppvMem = pvMem;
6572 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6573 AssertCompile(IEM_ACCESS_TYPE_MASK <= 0xf);
6574 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.aMemMappings) < 8);
6575
6576 return VINF_SUCCESS;
6577}
6578
6579
6580/**
6581 * Commits the guest memory if bounce buffered and unmaps it.
6582 *
6583 * @returns Strict VBox status code.
6584 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6585 * @param bUnmapInfo Unmap info set by iemMemMap.
6586 */
6587VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6588{
6589 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6590 AssertMsgReturn( (bUnmapInfo & 0x08)
6591 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6592 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf)) == ((unsigned)bUnmapInfo >> 4),
6593 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
6594 VERR_NOT_FOUND);
6595
6596 /* If it's bounce buffered, we may need to write back the buffer. */
6597 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6598 {
6599 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6600 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6601 }
6602 /* Otherwise unlock it. */
6603 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6604 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6605
6606 /* Free the entry. */
6607 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6608 Assert(pVCpu->iem.s.cActiveMappings != 0);
6609 pVCpu->iem.s.cActiveMappings--;
6610 return VINF_SUCCESS;
6611}
6612
6613
6614/**
6615 * Rolls back the guest memory (conceptually only) and unmaps it.
6616 *
6617 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6618 * @param bUnmapInfo Unmap info set by iemMemMap.
6619 */
6620void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6621{
6622 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6623 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
6624 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6625 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
6626 == ((unsigned)bUnmapInfo >> 4),
6627 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
6628
6629 /* Unlock it if necessary. */
6630 if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6631 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6632
6633 /* Free the entry. */
6634 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6635 Assert(pVCpu->iem.s.cActiveMappings != 0);
6636 pVCpu->iem.s.cActiveMappings--;
6637}
6638
6639#ifdef IEM_WITH_SETJMP
6640
6641/**
6642 * Maps the specified guest memory for the given kind of access, longjmp on
6643 * error.
6644 *
6645 * This may be using bounce buffering of the memory if it's crossing a page
6646 * boundary or if there is an access handler installed for any of it. Because
6647 * of lock prefix guarantees, we're in for some extra clutter when this
6648 * happens.
6649 *
6650 * This may raise a \#GP, \#SS, \#PF or \#AC.
6651 *
6652 * @returns Pointer to the mapped memory.
6653 *
6654 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6655 * @param bUnmapInfo Where to return unmap info to be passed to
6656 * iemMemCommitAndUnmapJmp, iemMemCommitAndUnmapRwSafeJmp,
6657 * iemMemCommitAndUnmapWoSafeJmp,
6658 * iemMemCommitAndUnmapRoSafeJmp,
6659 * iemMemRollbackAndUnmapWoSafe or iemMemRollbackAndUnmap
6660 * when done.
6661 * @param cbMem The number of bytes to map. This is usually 1,
6662 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6663 * string operations it can be up to a page.
6664 * @param iSegReg The index of the segment register to use for
6665 * this access. The base and limits are checked.
6666 * Use UINT8_MAX to indicate that no segmentation
6667 * is required (for IDT, GDT and LDT accesses).
6668 * @param GCPtrMem The address of the guest memory.
6669 * @param fAccess How the memory is being accessed. The
6670 * IEM_ACCESS_TYPE_XXX part is used to figure out how to
6671 * map the memory, while the IEM_ACCESS_WHAT_XXX part is
6672 * used when raising exceptions. The IEM_ACCESS_ATOMIC and
6673 * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be
6674 * set.
6675 * @param uAlignCtl Alignment control:
6676 * - Bits 15:0 is the alignment mask.
6677 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6678 * IEM_MEMMAP_F_ALIGN_SSE, and
6679 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6680 * Pass zero to skip alignment.
6681 */
6682void *iemMemMapJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess,
6683 uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
6684{
6685 /*
6686 * Check the input, check segment access and adjust address
6687 * with segment base.
6688 */
6689 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6690 Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE)));
6691 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6692
6693 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6694 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6695 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6696
6697 /*
6698 * Alignment check.
6699 */
6700 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6701 { /* likelyish */ }
6702 else
6703 {
6704 /* Misaligned access. */
6705 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6706 {
6707 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6708 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6709 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6710 {
6711 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6712
6713 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6714 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6715 }
6716 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6717 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6718 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6719 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6720 * that's what FXSAVE does on a 10980xe. */
6721 && iemMemAreAlignmentChecksEnabled(pVCpu))
6722 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6723 else
6724 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
6725 }
6726
6727#if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64)
6728 /* If the access is atomic there are host platform alignmnet restrictions
6729 we need to conform with. */
6730 if ( !(fAccess & IEM_ACCESS_ATOMIC)
6731# if defined(RT_ARCH_AMD64)
6732 || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */
6733# elif defined(RT_ARCH_ARM64)
6734 || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */
6735# else
6736# error port me
6737# endif
6738 )
6739 { /* okay */ }
6740 else
6741 {
6742 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem));
6743 pVCpu->iem.s.cMisalignedAtomics += 1;
6744 IEM_DO_LONGJMP(pVCpu, VINF_EM_EMULATE_SPLIT_LOCK);
6745 }
6746#endif
6747 }
6748
6749 /*
6750 * Figure out which mapping entry to use.
6751 */
6752 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6753 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6754 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6755 {
6756 iMemMap = iemMemMapFindFree(pVCpu);
6757 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6758 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6759 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6760 pVCpu->iem.s.aMemMappings[2].fAccess),
6761 IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_9));
6762 }
6763
6764 /*
6765 * Crossing a page boundary?
6766 */
6767 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
6768 { /* No (likely). */ }
6769 else
6770 {
6771 void *pvMem;
6772 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
6773 if (rcStrict == VINF_SUCCESS)
6774 return pvMem;
6775 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6776 }
6777
6778#ifdef IEM_WITH_DATA_TLB
6779 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6780
6781 /*
6782 * Get the TLB entry for this page.
6783 */
6784 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6785 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6786 if (pTlbe->uTag == uTag)
6787 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6788 else
6789 {
6790 pVCpu->iem.s.DataTlb.cTlbMisses++;
6791 PGMPTWALK Walk;
6792 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6793 if (RT_FAILURE(rc))
6794 {
6795 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6796# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6797 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6798 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6799# endif
6800 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6801 }
6802
6803 Assert(Walk.fSucceeded);
6804 pTlbe->uTag = uTag;
6805 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6806 pTlbe->GCPhys = Walk.GCPhys;
6807 pTlbe->pbMappingR3 = NULL;
6808 }
6809
6810 /*
6811 * Check the flags and physical revision.
6812 */
6813 /** @todo make the caller pass these in with fAccess. */
6814 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && IEM_GET_CPL(pVCpu) == 3
6815 ? IEMTLBE_F_PT_NO_USER : 0;
6816 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
6817 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
6818 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
6819 || (IEM_GET_CPL(pVCpu) == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6820 ? IEMTLBE_F_PT_NO_WRITE : 0)
6821 : 0;
6822 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
6823 uint8_t *pbMem = NULL;
6824 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
6825 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6826# ifdef IN_RING3
6827 pbMem = pTlbe->pbMappingR3;
6828# else
6829 pbMem = NULL;
6830# endif
6831 else
6832 {
6833 /*
6834 * Okay, something isn't quite right or needs refreshing.
6835 */
6836 /* Write to read only memory? */
6837 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
6838 {
6839 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6840# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6841 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6842 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6843# endif
6844 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6845 }
6846
6847 /* Kernel memory accessed by userland? */
6848 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
6849 {
6850 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6851# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6852 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6853 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6854# endif
6855 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6856 }
6857
6858 /* Set the dirty / access flags.
6859 ASSUMES this is set when the address is translated rather than on commit... */
6860 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6861 if (pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED))
6862 {
6863 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6864 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6865 AssertRC(rc2);
6866 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6867 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6868 pTlbe->fFlagsAndPhysRev &= ~((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED);
6869 }
6870
6871 /*
6872 * Check if the physical page info needs updating.
6873 */
6874 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6875# ifdef IN_RING3
6876 pbMem = pTlbe->pbMappingR3;
6877# else
6878 pbMem = NULL;
6879# endif
6880 else
6881 {
6882 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6883 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6884 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6885 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6886 AssertCompile(PGMIEMGCPHYS2PTR_F_CODE_PAGE == IEMTLBE_F_PG_CODE_PAGE);
6887 pTlbe->pbMappingR3 = NULL;
6888 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6889 | IEMTLBE_F_NO_MAPPINGR3
6890 | IEMTLBE_F_PG_NO_READ
6891 | IEMTLBE_F_PG_NO_WRITE
6892 | IEMTLBE_F_PG_UNASSIGNED
6893 | IEMTLBE_F_PG_CODE_PAGE);
6894 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6895 &pbMem, &pTlbe->fFlagsAndPhysRev);
6896 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
6897# ifdef IN_RING3
6898 pTlbe->pbMappingR3 = pbMem;
6899# endif
6900 }
6901
6902 /*
6903 * Check the physical page level access and mapping.
6904 */
6905 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
6906 { /* probably likely */ }
6907 else
6908 {
6909 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, pbUnmapInfo, cbMem,
6910 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6911 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6912 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6913 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6914 if (rcStrict == VINF_SUCCESS)
6915 return pbMem;
6916 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6917 }
6918 }
6919 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6920
6921 if (pbMem)
6922 {
6923 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6924 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6925 fAccess |= IEM_ACCESS_NOT_LOCKED;
6926 }
6927 else
6928 {
6929 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6930 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6931 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6932 if (rcStrict == VINF_SUCCESS)
6933 {
6934 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6935 return pbMem;
6936 }
6937 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6938 }
6939
6940 void * const pvMem = pbMem;
6941
6942 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6943 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6944 if (fAccess & IEM_ACCESS_TYPE_READ)
6945 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6946
6947#else /* !IEM_WITH_DATA_TLB */
6948
6949
6950 RTGCPHYS GCPhysFirst;
6951 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6952 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6953 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6954
6955 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6956 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6957 if (fAccess & IEM_ACCESS_TYPE_READ)
6958 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6959
6960 void *pvMem;
6961 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6962 if (rcStrict == VINF_SUCCESS)
6963 { /* likely */ }
6964 else
6965 {
6966 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6967 if (rcStrict == VINF_SUCCESS)
6968 return pvMem;
6969 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6970 }
6971
6972#endif /* !IEM_WITH_DATA_TLB */
6973
6974 /*
6975 * Fill in the mapping table entry.
6976 */
6977 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6978 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6979 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6980 pVCpu->iem.s.cActiveMappings++;
6981
6982 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6983
6984 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6985 return pvMem;
6986}
6987
6988
6989/**
6990 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
6991 *
6992 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6993 * @param pvMem The mapping.
6994 * @param fAccess The kind of access.
6995 */
6996void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
6997{
6998 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6999 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
7000 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
7001 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
7002 == ((unsigned)bUnmapInfo >> 4),
7003 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
7004
7005 /* If it's bounce buffered, we may need to write back the buffer. */
7006 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7007 {
7008 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7009 {
7010 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
7011 if (rcStrict == VINF_SUCCESS)
7012 return;
7013 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7014 }
7015 }
7016 /* Otherwise unlock it. */
7017 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
7018 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7019
7020 /* Free the entry. */
7021 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7022 Assert(pVCpu->iem.s.cActiveMappings != 0);
7023 pVCpu->iem.s.cActiveMappings--;
7024}
7025
7026
7027/** Fallback for iemMemCommitAndUnmapRwJmp. */
7028void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7029{
7030 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
7031 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7032}
7033
7034
7035/** Fallback for iemMemCommitAndUnmapAtJmp. */
7036void iemMemCommitAndUnmapAtSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7037{
7038 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
7039 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7040}
7041
7042
7043/** Fallback for iemMemCommitAndUnmapWoJmp. */
7044void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7045{
7046 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
7047 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7048}
7049
7050
7051/** Fallback for iemMemCommitAndUnmapRoJmp. */
7052void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7053{
7054 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_READ);
7055 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7056}
7057
7058
7059/** Fallback for iemMemRollbackAndUnmapWo. */
7060void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7061{
7062 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
7063 iemMemRollbackAndUnmap(pVCpu, bUnmapInfo);
7064}
7065
7066#endif /* IEM_WITH_SETJMP */
7067
7068#ifndef IN_RING3
7069/**
7070 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
7071 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
7072 *
7073 * Allows the instruction to be completed and retired, while the IEM user will
7074 * return to ring-3 immediately afterwards and do the postponed writes there.
7075 *
7076 * @returns VBox status code (no strict statuses). Caller must check
7077 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
7078 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7079 * @param pvMem The mapping.
7080 * @param fAccess The kind of access.
7081 */
7082VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7083{
7084 uintptr_t const iMemMap = bUnmapInfo & 0x7;
7085 AssertMsgReturn( (bUnmapInfo & 0x08)
7086 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
7087 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
7088 == ((unsigned)bUnmapInfo >> 4),
7089 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
7090 VERR_NOT_FOUND);
7091
7092 /* If it's bounce buffered, we may need to write back the buffer. */
7093 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7094 {
7095 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7096 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
7097 }
7098 /* Otherwise unlock it. */
7099 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
7100 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7101
7102 /* Free the entry. */
7103 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7104 Assert(pVCpu->iem.s.cActiveMappings != 0);
7105 pVCpu->iem.s.cActiveMappings--;
7106 return VINF_SUCCESS;
7107}
7108#endif
7109
7110
7111/**
7112 * Rollbacks mappings, releasing page locks and such.
7113 *
7114 * The caller shall only call this after checking cActiveMappings.
7115 *
7116 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7117 */
7118void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
7119{
7120 Assert(pVCpu->iem.s.cActiveMappings > 0);
7121
7122 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
7123 while (iMemMap-- > 0)
7124 {
7125 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
7126 if (fAccess != IEM_ACCESS_INVALID)
7127 {
7128 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
7129 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7130 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
7131 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7132 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
7133 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
7134 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
7135 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
7136 pVCpu->iem.s.cActiveMappings--;
7137 }
7138 }
7139}
7140
7141
7142/*
7143 * Instantiate R/W templates.
7144 */
7145#define TMPL_MEM_WITH_STACK
7146
7147#define TMPL_MEM_TYPE uint8_t
7148#define TMPL_MEM_FN_SUFF U8
7149#define TMPL_MEM_FMT_TYPE "%#04x"
7150#define TMPL_MEM_FMT_DESC "byte"
7151#include "IEMAllMemRWTmpl.cpp.h"
7152
7153#define TMPL_MEM_TYPE uint16_t
7154#define TMPL_MEM_FN_SUFF U16
7155#define TMPL_MEM_FMT_TYPE "%#06x"
7156#define TMPL_MEM_FMT_DESC "word"
7157#include "IEMAllMemRWTmpl.cpp.h"
7158
7159#define TMPL_WITH_PUSH_SREG
7160#define TMPL_MEM_TYPE uint32_t
7161#define TMPL_MEM_FN_SUFF U32
7162#define TMPL_MEM_FMT_TYPE "%#010x"
7163#define TMPL_MEM_FMT_DESC "dword"
7164#include "IEMAllMemRWTmpl.cpp.h"
7165#undef TMPL_WITH_PUSH_SREG
7166
7167#define TMPL_MEM_TYPE uint64_t
7168#define TMPL_MEM_FN_SUFF U64
7169#define TMPL_MEM_FMT_TYPE "%#018RX64"
7170#define TMPL_MEM_FMT_DESC "qword"
7171#include "IEMAllMemRWTmpl.cpp.h"
7172
7173#undef TMPL_MEM_WITH_STACK
7174
7175#define TMPL_MEM_TYPE uint64_t
7176#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) * 2 - 1)
7177#define TMPL_MEM_FN_SUFF U64AlignedU128
7178#define TMPL_MEM_FMT_TYPE "%#018RX64"
7179#define TMPL_MEM_FMT_DESC "qword"
7180#include "IEMAllMemRWTmpl.cpp.h"
7181
7182/* See IEMAllMemRWTmplInline.cpp.h */
7183#define TMPL_MEM_BY_REF
7184
7185#define TMPL_MEM_TYPE RTFLOAT80U
7186#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1)
7187#define TMPL_MEM_FN_SUFF R80
7188#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
7189#define TMPL_MEM_FMT_DESC "tword"
7190#include "IEMAllMemRWTmpl.cpp.h"
7191
7192#define TMPL_MEM_TYPE RTPBCD80U
7193#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1) /** @todo testcase: 80-bit BCD alignment */
7194#define TMPL_MEM_FN_SUFF D80
7195#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
7196#define TMPL_MEM_FMT_DESC "tword"
7197#include "IEMAllMemRWTmpl.cpp.h"
7198
7199#define TMPL_MEM_TYPE RTUINT128U
7200#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT128U) - 1)
7201#define TMPL_MEM_FN_SUFF U128
7202#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7203#define TMPL_MEM_FMT_DESC "dqword"
7204#include "IEMAllMemRWTmpl.cpp.h"
7205
7206#define TMPL_MEM_TYPE RTUINT128U
7207#define TMPL_MEM_TYPE_ALIGN 0
7208#define TMPL_MEM_FN_SUFF U128NoAc
7209#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7210#define TMPL_MEM_FMT_DESC "dqword"
7211#include "IEMAllMemRWTmpl.cpp.h"
7212
7213/**
7214 * Fetches a data dword and zero extends it to a qword.
7215 *
7216 * @returns Strict VBox status code.
7217 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7218 * @param pu64Dst Where to return the qword.
7219 * @param iSegReg The index of the segment register to use for
7220 * this access. The base and limits are checked.
7221 * @param GCPtrMem The address of the guest memory.
7222 */
7223VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7224{
7225 /* The lazy approach for now... */
7226 uint8_t bUnmapInfo;
7227 uint32_t const *pu32Src;
7228 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem,
7229 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7230 if (rc == VINF_SUCCESS)
7231 {
7232 *pu64Dst = *pu32Src;
7233 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7234 Log(("IEM RD dword %d|%RGv: %#010RX64\n", iSegReg, GCPtrMem, *pu64Dst));
7235 }
7236 return rc;
7237}
7238
7239
7240#ifdef SOME_UNUSED_FUNCTION
7241/**
7242 * Fetches a data dword and sign extends it to a qword.
7243 *
7244 * @returns Strict VBox status code.
7245 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7246 * @param pu64Dst Where to return the sign extended value.
7247 * @param iSegReg The index of the segment register to use for
7248 * this access. The base and limits are checked.
7249 * @param GCPtrMem The address of the guest memory.
7250 */
7251VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7252{
7253 /* The lazy approach for now... */
7254 uint8_t bUnmapInfo;
7255 int32_t const *pi32Src;
7256 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, &bUnmapInfo, sizeof(*pi32Src), iSegReg, GCPtrMem,
7257 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
7258 if (rc == VINF_SUCCESS)
7259 {
7260 *pu64Dst = *pi32Src;
7261 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7262 Log(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, (uint32_t)*pu64Dst));
7263 }
7264#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7265 else
7266 *pu64Dst = 0;
7267#endif
7268 return rc;
7269}
7270#endif
7271
7272
7273/**
7274 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7275 * related.
7276 *
7277 * Raises \#GP(0) if not aligned.
7278 *
7279 * @returns Strict VBox status code.
7280 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7281 * @param pu128Dst Where to return the qword.
7282 * @param iSegReg The index of the segment register to use for
7283 * this access. The base and limits are checked.
7284 * @param GCPtrMem The address of the guest memory.
7285 */
7286VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7287{
7288 /* The lazy approach for now... */
7289 uint8_t bUnmapInfo;
7290 PCRTUINT128U pu128Src;
7291 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, &bUnmapInfo, sizeof(*pu128Src), iSegReg, GCPtrMem,
7292 IEM_ACCESS_DATA_R, (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7293 if (rc == VINF_SUCCESS)
7294 {
7295 pu128Dst->au64[0] = pu128Src->au64[0];
7296 pu128Dst->au64[1] = pu128Src->au64[1];
7297 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7298 Log(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7299 }
7300 return rc;
7301}
7302
7303
7304#ifdef IEM_WITH_SETJMP
7305/**
7306 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7307 * related, longjmp on error.
7308 *
7309 * Raises \#GP(0) if not aligned.
7310 *
7311 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7312 * @param pu128Dst Where to return the qword.
7313 * @param iSegReg The index of the segment register to use for
7314 * this access. The base and limits are checked.
7315 * @param GCPtrMem The address of the guest memory.
7316 */
7317void iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg,
7318 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7319{
7320 /* The lazy approach for now... */
7321 uint8_t bUnmapInfo;
7322 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7323 (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7324 pu128Dst->au64[0] = pu128Src->au64[0];
7325 pu128Dst->au64[1] = pu128Src->au64[1];
7326 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7327 Log(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7328}
7329#endif
7330
7331
7332/**
7333 * Fetches a data oword (octo word), generally AVX related.
7334 *
7335 * @returns Strict VBox status code.
7336 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7337 * @param pu256Dst Where to return the qword.
7338 * @param iSegReg The index of the segment register to use for
7339 * this access. The base and limits are checked.
7340 * @param GCPtrMem The address of the guest memory.
7341 */
7342VBOXSTRICTRC iemMemFetchDataU256NoAc(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7343{
7344 /* The lazy approach for now... */
7345 uint8_t bUnmapInfo;
7346 PCRTUINT256U pu256Src;
7347 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, &bUnmapInfo, sizeof(*pu256Src), iSegReg, GCPtrMem,
7348 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7349 if (rc == VINF_SUCCESS)
7350 {
7351 pu256Dst->au64[0] = pu256Src->au64[0];
7352 pu256Dst->au64[1] = pu256Src->au64[1];
7353 pu256Dst->au64[2] = pu256Src->au64[2];
7354 pu256Dst->au64[3] = pu256Src->au64[3];
7355 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7356 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7357 }
7358 return rc;
7359}
7360
7361
7362#ifdef IEM_WITH_SETJMP
7363/**
7364 * Fetches a data oword (octo word), generally AVX related.
7365 *
7366 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7367 * @param pu256Dst Where to return the qword.
7368 * @param iSegReg The index of the segment register to use for
7369 * this access. The base and limits are checked.
7370 * @param GCPtrMem The address of the guest memory.
7371 */
7372void iemMemFetchDataU256NoAcJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7373{
7374 /* The lazy approach for now... */
7375 uint8_t bUnmapInfo;
7376 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Src), iSegReg, GCPtrMem,
7377 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7378 pu256Dst->au64[0] = pu256Src->au64[0];
7379 pu256Dst->au64[1] = pu256Src->au64[1];
7380 pu256Dst->au64[2] = pu256Src->au64[2];
7381 pu256Dst->au64[3] = pu256Src->au64[3];
7382 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7383 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7384}
7385#endif
7386
7387
7388/**
7389 * Fetches a data oword (octo word) at an aligned address, generally AVX
7390 * related.
7391 *
7392 * Raises \#GP(0) if not aligned.
7393 *
7394 * @returns Strict VBox status code.
7395 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7396 * @param pu256Dst Where to return the qword.
7397 * @param iSegReg The index of the segment register to use for
7398 * this access. The base and limits are checked.
7399 * @param GCPtrMem The address of the guest memory.
7400 */
7401VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7402{
7403 /* The lazy approach for now... */
7404 uint8_t bUnmapInfo;
7405 PCRTUINT256U pu256Src;
7406 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, &bUnmapInfo, sizeof(*pu256Src), iSegReg, GCPtrMem,
7407 IEM_ACCESS_DATA_R, (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7408 if (rc == VINF_SUCCESS)
7409 {
7410 pu256Dst->au64[0] = pu256Src->au64[0];
7411 pu256Dst->au64[1] = pu256Src->au64[1];
7412 pu256Dst->au64[2] = pu256Src->au64[2];
7413 pu256Dst->au64[3] = pu256Src->au64[3];
7414 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7415 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7416 }
7417 return rc;
7418}
7419
7420
7421#ifdef IEM_WITH_SETJMP
7422/**
7423 * Fetches a data oword (octo word) at an aligned address, generally AVX
7424 * related, longjmp on error.
7425 *
7426 * Raises \#GP(0) if not aligned.
7427 *
7428 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7429 * @param pu256Dst Where to return the qword.
7430 * @param iSegReg The index of the segment register to use for
7431 * this access. The base and limits are checked.
7432 * @param GCPtrMem The address of the guest memory.
7433 */
7434void iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg,
7435 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7436{
7437 /* The lazy approach for now... */
7438 uint8_t bUnmapInfo;
7439 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7440 (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7441 pu256Dst->au64[0] = pu256Src->au64[0];
7442 pu256Dst->au64[1] = pu256Src->au64[1];
7443 pu256Dst->au64[2] = pu256Src->au64[2];
7444 pu256Dst->au64[3] = pu256Src->au64[3];
7445 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7446 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7447}
7448#endif
7449
7450
7451
7452/**
7453 * Fetches a descriptor register (lgdt, lidt).
7454 *
7455 * @returns Strict VBox status code.
7456 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7457 * @param pcbLimit Where to return the limit.
7458 * @param pGCPtrBase Where to return the base.
7459 * @param iSegReg The index of the segment register to use for
7460 * this access. The base and limits are checked.
7461 * @param GCPtrMem The address of the guest memory.
7462 * @param enmOpSize The effective operand size.
7463 */
7464VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7465 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7466{
7467 /*
7468 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7469 * little special:
7470 * - The two reads are done separately.
7471 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7472 * - We suspect the 386 to actually commit the limit before the base in
7473 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7474 * don't try emulate this eccentric behavior, because it's not well
7475 * enough understood and rather hard to trigger.
7476 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7477 */
7478 VBOXSTRICTRC rcStrict;
7479 if (IEM_IS_64BIT_CODE(pVCpu))
7480 {
7481 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7482 if (rcStrict == VINF_SUCCESS)
7483 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7484 }
7485 else
7486 {
7487 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7488 if (enmOpSize == IEMMODE_32BIT)
7489 {
7490 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7491 {
7492 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7493 if (rcStrict == VINF_SUCCESS)
7494 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7495 }
7496 else
7497 {
7498 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7499 if (rcStrict == VINF_SUCCESS)
7500 {
7501 *pcbLimit = (uint16_t)uTmp;
7502 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7503 }
7504 }
7505 if (rcStrict == VINF_SUCCESS)
7506 *pGCPtrBase = uTmp;
7507 }
7508 else
7509 {
7510 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7511 if (rcStrict == VINF_SUCCESS)
7512 {
7513 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7514 if (rcStrict == VINF_SUCCESS)
7515 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7516 }
7517 }
7518 }
7519 return rcStrict;
7520}
7521
7522
7523/**
7524 * Stores a data dqword, SSE aligned.
7525 *
7526 * @returns Strict VBox status code.
7527 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7528 * @param iSegReg The index of the segment register to use for
7529 * this access. The base and limits are checked.
7530 * @param GCPtrMem The address of the guest memory.
7531 * @param u128Value The value to store.
7532 */
7533VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7534{
7535 /* The lazy approach for now... */
7536 uint8_t bUnmapInfo;
7537 PRTUINT128U pu128Dst;
7538 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7539 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7540 if (rc == VINF_SUCCESS)
7541 {
7542 pu128Dst->au64[0] = u128Value.au64[0];
7543 pu128Dst->au64[1] = u128Value.au64[1];
7544 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7545 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7546 }
7547 return rc;
7548}
7549
7550
7551#ifdef IEM_WITH_SETJMP
7552/**
7553 * Stores a data dqword, SSE aligned.
7554 *
7555 * @returns Strict VBox status code.
7556 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7557 * @param iSegReg The index of the segment register to use for
7558 * this access. The base and limits are checked.
7559 * @param GCPtrMem The address of the guest memory.
7560 * @param u128Value The value to store.
7561 */
7562void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7563 RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7564{
7565 /* The lazy approach for now... */
7566 uint8_t bUnmapInfo;
7567 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7568 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7569 pu128Dst->au64[0] = u128Value.au64[0];
7570 pu128Dst->au64[1] = u128Value.au64[1];
7571 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7572 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7573}
7574#endif
7575
7576
7577/**
7578 * Stores a data dqword.
7579 *
7580 * @returns Strict VBox status code.
7581 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7582 * @param iSegReg The index of the segment register to use for
7583 * this access. The base and limits are checked.
7584 * @param GCPtrMem The address of the guest memory.
7585 * @param pu256Value Pointer to the value to store.
7586 */
7587VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7588{
7589 /* The lazy approach for now... */
7590 uint8_t bUnmapInfo;
7591 PRTUINT256U pu256Dst;
7592 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7593 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7594 if (rc == VINF_SUCCESS)
7595 {
7596 pu256Dst->au64[0] = pu256Value->au64[0];
7597 pu256Dst->au64[1] = pu256Value->au64[1];
7598 pu256Dst->au64[2] = pu256Value->au64[2];
7599 pu256Dst->au64[3] = pu256Value->au64[3];
7600 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7601 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7602 }
7603 return rc;
7604}
7605
7606
7607#ifdef IEM_WITH_SETJMP
7608/**
7609 * Stores a data dqword, longjmp on error.
7610 *
7611 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7612 * @param iSegReg The index of the segment register to use for
7613 * this access. The base and limits are checked.
7614 * @param GCPtrMem The address of the guest memory.
7615 * @param pu256Value Pointer to the value to store.
7616 */
7617void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7618{
7619 /* The lazy approach for now... */
7620 uint8_t bUnmapInfo;
7621 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7622 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7623 pu256Dst->au64[0] = pu256Value->au64[0];
7624 pu256Dst->au64[1] = pu256Value->au64[1];
7625 pu256Dst->au64[2] = pu256Value->au64[2];
7626 pu256Dst->au64[3] = pu256Value->au64[3];
7627 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7628 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7629}
7630#endif
7631
7632
7633/**
7634 * Stores a data qqword.
7635 *
7636 * @returns Strict VBox status code.
7637 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7638 * @param iSegReg The index of the segment register to use for
7639 * this access. The base and limits are checked.
7640 * @param GCPtrMem The address of the guest memory.
7641 * @param pu256Value Pointer to the value to store.
7642 */
7643VBOXSTRICTRC iemMemStoreDataU256NoAc(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7644{
7645 /* The lazy approach for now... */
7646 uint8_t bUnmapInfo;
7647 PRTUINT256U pu256Dst;
7648 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7649 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7650 if (rc == VINF_SUCCESS)
7651 {
7652 pu256Dst->au64[0] = pu256Value->au64[0];
7653 pu256Dst->au64[1] = pu256Value->au64[1];
7654 pu256Dst->au64[2] = pu256Value->au64[2];
7655 pu256Dst->au64[3] = pu256Value->au64[3];
7656 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7657 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7658 }
7659 return rc;
7660}
7661
7662
7663#ifdef IEM_WITH_SETJMP
7664/**
7665 * Stores a data qqword, longjmp on error.
7666 *
7667 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7668 * @param iSegReg The index of the segment register to use for
7669 * this access. The base and limits are checked.
7670 * @param GCPtrMem The address of the guest memory.
7671 * @param pu256Value Pointer to the value to store.
7672 */
7673void iemMemStoreDataU256NoAcJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7674{
7675 /* The lazy approach for now... */
7676 uint8_t bUnmapInfo;
7677 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7678 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7679 pu256Dst->au64[0] = pu256Value->au64[0];
7680 pu256Dst->au64[1] = pu256Value->au64[1];
7681 pu256Dst->au64[2] = pu256Value->au64[2];
7682 pu256Dst->au64[3] = pu256Value->au64[3];
7683 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7684 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7685}
7686#endif
7687
7688
7689/**
7690 * Stores a data dqword, AVX \#GP(0) aligned.
7691 *
7692 * @returns Strict VBox status code.
7693 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7694 * @param iSegReg The index of the segment register to use for
7695 * this access. The base and limits are checked.
7696 * @param GCPtrMem The address of the guest memory.
7697 * @param pu256Value Pointer to the value to store.
7698 */
7699VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7700{
7701 /* The lazy approach for now... */
7702 uint8_t bUnmapInfo;
7703 PRTUINT256U pu256Dst;
7704 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7705 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7706 if (rc == VINF_SUCCESS)
7707 {
7708 pu256Dst->au64[0] = pu256Value->au64[0];
7709 pu256Dst->au64[1] = pu256Value->au64[1];
7710 pu256Dst->au64[2] = pu256Value->au64[2];
7711 pu256Dst->au64[3] = pu256Value->au64[3];
7712 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7713 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7714 }
7715 return rc;
7716}
7717
7718
7719#ifdef IEM_WITH_SETJMP
7720/**
7721 * Stores a data dqword, AVX aligned.
7722 *
7723 * @returns Strict VBox status code.
7724 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7725 * @param iSegReg The index of the segment register to use for
7726 * this access. The base and limits are checked.
7727 * @param GCPtrMem The address of the guest memory.
7728 * @param pu256Value Pointer to the value to store.
7729 */
7730void iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7731 PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7732{
7733 /* The lazy approach for now... */
7734 uint8_t bUnmapInfo;
7735 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7736 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7737 pu256Dst->au64[0] = pu256Value->au64[0];
7738 pu256Dst->au64[1] = pu256Value->au64[1];
7739 pu256Dst->au64[2] = pu256Value->au64[2];
7740 pu256Dst->au64[3] = pu256Value->au64[3];
7741 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7742 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7743}
7744#endif
7745
7746
7747/**
7748 * Stores a descriptor register (sgdt, sidt).
7749 *
7750 * @returns Strict VBox status code.
7751 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7752 * @param cbLimit The limit.
7753 * @param GCPtrBase The base address.
7754 * @param iSegReg The index of the segment register to use for
7755 * this access. The base and limits are checked.
7756 * @param GCPtrMem The address of the guest memory.
7757 */
7758VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7759{
7760 /*
7761 * The SIDT and SGDT instructions actually stores the data using two
7762 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
7763 * does not respond to opsize prefixes.
7764 */
7765 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
7766 if (rcStrict == VINF_SUCCESS)
7767 {
7768 if (IEM_IS_16BIT_CODE(pVCpu))
7769 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
7770 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
7771 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
7772 else if (IEM_IS_32BIT_CODE(pVCpu))
7773 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
7774 else
7775 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
7776 }
7777 return rcStrict;
7778}
7779
7780
7781/**
7782 * Begin a special stack push (used by interrupt, exceptions and such).
7783 *
7784 * This will raise \#SS or \#PF if appropriate.
7785 *
7786 * @returns Strict VBox status code.
7787 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7788 * @param cbMem The number of bytes to push onto the stack.
7789 * @param cbAlign The alignment mask (7, 3, 1).
7790 * @param ppvMem Where to return the pointer to the stack memory.
7791 * As with the other memory functions this could be
7792 * direct access or bounce buffered access, so
7793 * don't commit register until the commit call
7794 * succeeds.
7795 * @param pbUnmapInfo Where to store unmap info for
7796 * iemMemStackPushCommitSpecial.
7797 * @param puNewRsp Where to return the new RSP value. This must be
7798 * passed unchanged to
7799 * iemMemStackPushCommitSpecial().
7800 */
7801VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
7802 void **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
7803{
7804 Assert(cbMem < UINT8_MAX);
7805 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
7806 return iemMemMap(pVCpu, ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W, cbAlign);
7807}
7808
7809
7810/**
7811 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
7812 *
7813 * This will update the rSP.
7814 *
7815 * @returns Strict VBox status code.
7816 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7817 * @param bUnmapInfo Unmap info set by iemMemStackPushBeginSpecial.
7818 * @param uNewRsp The new RSP value returned by
7819 * iemMemStackPushBeginSpecial().
7820 */
7821VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo, uint64_t uNewRsp) RT_NOEXCEPT
7822{
7823 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7824 if (rcStrict == VINF_SUCCESS)
7825 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7826 return rcStrict;
7827}
7828
7829
7830/**
7831 * Begin a special stack pop (used by iret, retf and such).
7832 *
7833 * This will raise \#SS or \#PF if appropriate.
7834 *
7835 * @returns Strict VBox status code.
7836 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7837 * @param cbMem The number of bytes to pop from the stack.
7838 * @param cbAlign The alignment mask (7, 3, 1).
7839 * @param ppvMem Where to return the pointer to the stack memory.
7840 * @param pbUnmapInfo Where to store unmap info for
7841 * iemMemStackPopDoneSpecial.
7842 * @param puNewRsp Where to return the new RSP value. This must be
7843 * assigned to CPUMCTX::rsp manually some time
7844 * after iemMemStackPopDoneSpecial() has been
7845 * called.
7846 */
7847VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
7848 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
7849{
7850 Assert(cbMem < UINT8_MAX);
7851 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
7852 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
7853}
7854
7855
7856/**
7857 * Continue a special stack pop (used by iret and retf), for the purpose of
7858 * retrieving a new stack pointer.
7859 *
7860 * This will raise \#SS or \#PF if appropriate.
7861 *
7862 * @returns Strict VBox status code.
7863 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7864 * @param off Offset from the top of the stack. This is zero
7865 * except in the retf case.
7866 * @param cbMem The number of bytes to pop from the stack.
7867 * @param ppvMem Where to return the pointer to the stack memory.
7868 * @param pbUnmapInfo Where to store unmap info for
7869 * iemMemStackPopDoneSpecial.
7870 * @param uCurNewRsp The current uncommitted RSP value. (No need to
7871 * return this because all use of this function is
7872 * to retrieve a new value and anything we return
7873 * here would be discarded.)
7874 */
7875VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
7876 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t uCurNewRsp) RT_NOEXCEPT
7877{
7878 Assert(cbMem < UINT8_MAX);
7879
7880 /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */
7881 RTGCPTR GCPtrTop;
7882 if (IEM_IS_64BIT_CODE(pVCpu))
7883 GCPtrTop = uCurNewRsp;
7884 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
7885 GCPtrTop = (uint32_t)uCurNewRsp;
7886 else
7887 GCPtrTop = (uint16_t)uCurNewRsp;
7888
7889 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
7890 0 /* checked in iemMemStackPopBeginSpecial */);
7891}
7892
7893
7894/**
7895 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
7896 * iemMemStackPopContinueSpecial).
7897 *
7898 * The caller will manually commit the rSP.
7899 *
7900 * @returns Strict VBox status code.
7901 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7902 * @param bUnmapInfo Unmap information returned by
7903 * iemMemStackPopBeginSpecial() or
7904 * iemMemStackPopContinueSpecial().
7905 */
7906VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7907{
7908 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7909}
7910
7911
7912/**
7913 * Fetches a system table byte.
7914 *
7915 * @returns Strict VBox status code.
7916 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7917 * @param pbDst Where to return the byte.
7918 * @param iSegReg The index of the segment register to use for
7919 * this access. The base and limits are checked.
7920 * @param GCPtrMem The address of the guest memory.
7921 */
7922VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7923{
7924 /* The lazy approach for now... */
7925 uint8_t bUnmapInfo;
7926 uint8_t const *pbSrc;
7927 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, &bUnmapInfo, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7928 if (rc == VINF_SUCCESS)
7929 {
7930 *pbDst = *pbSrc;
7931 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7932 }
7933 return rc;
7934}
7935
7936
7937/**
7938 * Fetches a system table word.
7939 *
7940 * @returns Strict VBox status code.
7941 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7942 * @param pu16Dst Where to return the word.
7943 * @param iSegReg The index of the segment register to use for
7944 * this access. The base and limits are checked.
7945 * @param GCPtrMem The address of the guest memory.
7946 */
7947VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7948{
7949 /* The lazy approach for now... */
7950 uint8_t bUnmapInfo;
7951 uint16_t const *pu16Src;
7952 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, &bUnmapInfo, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7953 if (rc == VINF_SUCCESS)
7954 {
7955 *pu16Dst = *pu16Src;
7956 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7957 }
7958 return rc;
7959}
7960
7961
7962/**
7963 * Fetches a system table dword.
7964 *
7965 * @returns Strict VBox status code.
7966 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7967 * @param pu32Dst Where to return the dword.
7968 * @param iSegReg The index of the segment register to use for
7969 * this access. The base and limits are checked.
7970 * @param GCPtrMem The address of the guest memory.
7971 */
7972VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7973{
7974 /* The lazy approach for now... */
7975 uint8_t bUnmapInfo;
7976 uint32_t const *pu32Src;
7977 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7978 if (rc == VINF_SUCCESS)
7979 {
7980 *pu32Dst = *pu32Src;
7981 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7982 }
7983 return rc;
7984}
7985
7986
7987/**
7988 * Fetches a system table qword.
7989 *
7990 * @returns Strict VBox status code.
7991 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7992 * @param pu64Dst Where to return the qword.
7993 * @param iSegReg The index of the segment register to use for
7994 * this access. The base and limits are checked.
7995 * @param GCPtrMem The address of the guest memory.
7996 */
7997VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7998{
7999 /* The lazy approach for now... */
8000 uint8_t bUnmapInfo;
8001 uint64_t const *pu64Src;
8002 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, &bUnmapInfo, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8003 if (rc == VINF_SUCCESS)
8004 {
8005 *pu64Dst = *pu64Src;
8006 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8007 }
8008 return rc;
8009}
8010
8011
8012/**
8013 * Fetches a descriptor table entry with caller specified error code.
8014 *
8015 * @returns Strict VBox status code.
8016 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8017 * @param pDesc Where to return the descriptor table entry.
8018 * @param uSel The selector which table entry to fetch.
8019 * @param uXcpt The exception to raise on table lookup error.
8020 * @param uErrorCode The error code associated with the exception.
8021 */
8022static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
8023 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
8024{
8025 AssertPtr(pDesc);
8026 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
8027
8028 /** @todo did the 286 require all 8 bytes to be accessible? */
8029 /*
8030 * Get the selector table base and check bounds.
8031 */
8032 RTGCPTR GCPtrBase;
8033 if (uSel & X86_SEL_LDT)
8034 {
8035 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
8036 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
8037 {
8038 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8039 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
8040 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8041 uErrorCode, 0);
8042 }
8043
8044 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
8045 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
8046 }
8047 else
8048 {
8049 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
8050 {
8051 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
8052 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8053 uErrorCode, 0);
8054 }
8055 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
8056 }
8057
8058 /*
8059 * Read the legacy descriptor and maybe the long mode extensions if
8060 * required.
8061 */
8062 VBOXSTRICTRC rcStrict;
8063 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
8064 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8065 else
8066 {
8067 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
8068 if (rcStrict == VINF_SUCCESS)
8069 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
8070 if (rcStrict == VINF_SUCCESS)
8071 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
8072 if (rcStrict == VINF_SUCCESS)
8073 pDesc->Legacy.au16[3] = 0;
8074 else
8075 return rcStrict;
8076 }
8077
8078 if (rcStrict == VINF_SUCCESS)
8079 {
8080 if ( !IEM_IS_LONG_MODE(pVCpu)
8081 || pDesc->Legacy.Gen.u1DescType)
8082 pDesc->Long.au64[1] = 0;
8083 else if ( (uint32_t)(uSel | X86_SEL_RPL_LDT) + 8
8084 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
8085 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8086 else
8087 {
8088 LogEx(LOG_GROUP_IEM,("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8089 /** @todo is this the right exception? */
8090 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8091 }
8092 }
8093 return rcStrict;
8094}
8095
8096
8097/**
8098 * Fetches a descriptor table entry.
8099 *
8100 * @returns Strict VBox status code.
8101 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8102 * @param pDesc Where to return the descriptor table entry.
8103 * @param uSel The selector which table entry to fetch.
8104 * @param uXcpt The exception to raise on table lookup error.
8105 */
8106VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
8107{
8108 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8109}
8110
8111
8112/**
8113 * Marks the selector descriptor as accessed (only non-system descriptors).
8114 *
8115 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8116 * will therefore skip the limit checks.
8117 *
8118 * @returns Strict VBox status code.
8119 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8120 * @param uSel The selector.
8121 */
8122VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
8123{
8124 /*
8125 * Get the selector table base and calculate the entry address.
8126 */
8127 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8128 ? pVCpu->cpum.GstCtx.ldtr.u64Base
8129 : pVCpu->cpum.GstCtx.gdtr.pGdt;
8130 GCPtr += uSel & X86_SEL_MASK;
8131
8132 /*
8133 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8134 * ugly stuff to avoid this. This will make sure it's an atomic access
8135 * as well more or less remove any question about 8-bit or 32-bit accesss.
8136 */
8137 VBOXSTRICTRC rcStrict;
8138 uint8_t bUnmapInfo;
8139 uint32_t volatile *pu32;
8140 if ((GCPtr & 3) == 0)
8141 {
8142 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8143 GCPtr += 2 + 2;
8144 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8145 if (rcStrict != VINF_SUCCESS)
8146 return rcStrict;
8147 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8148 }
8149 else
8150 {
8151 /* The misaligned GDT/LDT case, map the whole thing. */
8152 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8153 if (rcStrict != VINF_SUCCESS)
8154 return rcStrict;
8155 switch ((uintptr_t)pu32 & 3)
8156 {
8157 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8158 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8159 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8160 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8161 }
8162 }
8163
8164 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8165}
8166
8167
8168#undef LOG_GROUP
8169#define LOG_GROUP LOG_GROUP_IEM
8170
8171/** @} */
8172
8173/** @name Opcode Helpers.
8174 * @{
8175 */
8176
8177/**
8178 * Calculates the effective address of a ModR/M memory operand.
8179 *
8180 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8181 *
8182 * @return Strict VBox status code.
8183 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8184 * @param bRm The ModRM byte.
8185 * @param cbImmAndRspOffset - First byte: The size of any immediate
8186 * following the effective address opcode bytes
8187 * (only for RIP relative addressing).
8188 * - Second byte: RSP displacement (for POP [ESP]).
8189 * @param pGCPtrEff Where to return the effective address.
8190 */
8191VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
8192{
8193 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8194# define SET_SS_DEF() \
8195 do \
8196 { \
8197 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8198 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8199 } while (0)
8200
8201 if (!IEM_IS_64BIT_CODE(pVCpu))
8202 {
8203/** @todo Check the effective address size crap! */
8204 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8205 {
8206 uint16_t u16EffAddr;
8207
8208 /* Handle the disp16 form with no registers first. */
8209 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8210 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8211 else
8212 {
8213 /* Get the displacment. */
8214 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8215 {
8216 case 0: u16EffAddr = 0; break;
8217 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8218 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8219 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8220 }
8221
8222 /* Add the base and index registers to the disp. */
8223 switch (bRm & X86_MODRM_RM_MASK)
8224 {
8225 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8226 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8227 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8228 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8229 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8230 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8231 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8232 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8233 }
8234 }
8235
8236 *pGCPtrEff = u16EffAddr;
8237 }
8238 else
8239 {
8240 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8241 uint32_t u32EffAddr;
8242
8243 /* Handle the disp32 form with no registers first. */
8244 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8245 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8246 else
8247 {
8248 /* Get the register (or SIB) value. */
8249 switch ((bRm & X86_MODRM_RM_MASK))
8250 {
8251 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8252 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8253 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8254 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8255 case 4: /* SIB */
8256 {
8257 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8258
8259 /* Get the index and scale it. */
8260 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8261 {
8262 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8263 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8264 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8265 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8266 case 4: u32EffAddr = 0; /*none */ break;
8267 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8268 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8269 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8270 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8271 }
8272 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8273
8274 /* add base */
8275 switch (bSib & X86_SIB_BASE_MASK)
8276 {
8277 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8278 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8279 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8280 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8281 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8282 case 5:
8283 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8284 {
8285 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8286 SET_SS_DEF();
8287 }
8288 else
8289 {
8290 uint32_t u32Disp;
8291 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8292 u32EffAddr += u32Disp;
8293 }
8294 break;
8295 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8296 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8297 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8298 }
8299 break;
8300 }
8301 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8302 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8303 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8304 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8305 }
8306
8307 /* Get and add the displacement. */
8308 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8309 {
8310 case 0:
8311 break;
8312 case 1:
8313 {
8314 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8315 u32EffAddr += i8Disp;
8316 break;
8317 }
8318 case 2:
8319 {
8320 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8321 u32EffAddr += u32Disp;
8322 break;
8323 }
8324 default:
8325 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8326 }
8327
8328 }
8329 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8330 *pGCPtrEff = u32EffAddr;
8331 }
8332 }
8333 else
8334 {
8335 uint64_t u64EffAddr;
8336
8337 /* Handle the rip+disp32 form with no registers first. */
8338 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8339 {
8340 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8341 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8342 }
8343 else
8344 {
8345 /* Get the register (or SIB) value. */
8346 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8347 {
8348 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8349 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8350 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8351 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8352 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8353 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8354 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8355 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8356 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8357 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8358 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8359 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8360 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8361 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8362 /* SIB */
8363 case 4:
8364 case 12:
8365 {
8366 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8367
8368 /* Get the index and scale it. */
8369 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8370 {
8371 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8372 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8373 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8374 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8375 case 4: u64EffAddr = 0; /*none */ break;
8376 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8377 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8378 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8379 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8380 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8381 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8382 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8383 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8384 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8385 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8386 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8387 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8388 }
8389 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8390
8391 /* add base */
8392 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8393 {
8394 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8395 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8396 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8397 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8398 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8399 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8400 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8401 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8402 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8403 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8404 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8405 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8406 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8407 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8408 /* complicated encodings */
8409 case 5:
8410 case 13:
8411 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8412 {
8413 if (!pVCpu->iem.s.uRexB)
8414 {
8415 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8416 SET_SS_DEF();
8417 }
8418 else
8419 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8420 }
8421 else
8422 {
8423 uint32_t u32Disp;
8424 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8425 u64EffAddr += (int32_t)u32Disp;
8426 }
8427 break;
8428 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8429 }
8430 break;
8431 }
8432 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8433 }
8434
8435 /* Get and add the displacement. */
8436 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8437 {
8438 case 0:
8439 break;
8440 case 1:
8441 {
8442 int8_t i8Disp;
8443 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8444 u64EffAddr += i8Disp;
8445 break;
8446 }
8447 case 2:
8448 {
8449 uint32_t u32Disp;
8450 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8451 u64EffAddr += (int32_t)u32Disp;
8452 break;
8453 }
8454 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8455 }
8456
8457 }
8458
8459 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8460 *pGCPtrEff = u64EffAddr;
8461 else
8462 {
8463 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8464 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8465 }
8466 }
8467
8468 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
8469 return VINF_SUCCESS;
8470}
8471
8472
8473#ifdef IEM_WITH_SETJMP
8474/**
8475 * Calculates the effective address of a ModR/M memory operand.
8476 *
8477 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8478 *
8479 * May longjmp on internal error.
8480 *
8481 * @return The effective address.
8482 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8483 * @param bRm The ModRM byte.
8484 * @param cbImmAndRspOffset - First byte: The size of any immediate
8485 * following the effective address opcode bytes
8486 * (only for RIP relative addressing).
8487 * - Second byte: RSP displacement (for POP [ESP]).
8488 */
8489RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP
8490{
8491 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
8492# define SET_SS_DEF() \
8493 do \
8494 { \
8495 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8496 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8497 } while (0)
8498
8499 if (!IEM_IS_64BIT_CODE(pVCpu))
8500 {
8501/** @todo Check the effective address size crap! */
8502 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8503 {
8504 uint16_t u16EffAddr;
8505
8506 /* Handle the disp16 form with no registers first. */
8507 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8508 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8509 else
8510 {
8511 /* Get the displacment. */
8512 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8513 {
8514 case 0: u16EffAddr = 0; break;
8515 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8516 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8517 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */
8518 }
8519
8520 /* Add the base and index registers to the disp. */
8521 switch (bRm & X86_MODRM_RM_MASK)
8522 {
8523 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8524 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8525 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8526 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8527 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8528 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8529 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8530 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8531 }
8532 }
8533
8534 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
8535 return u16EffAddr;
8536 }
8537
8538 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8539 uint32_t u32EffAddr;
8540
8541 /* Handle the disp32 form with no registers first. */
8542 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8543 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8544 else
8545 {
8546 /* Get the register (or SIB) value. */
8547 switch ((bRm & X86_MODRM_RM_MASK))
8548 {
8549 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8550 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8551 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8552 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8553 case 4: /* SIB */
8554 {
8555 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8556
8557 /* Get the index and scale it. */
8558 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8559 {
8560 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8561 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8562 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8563 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8564 case 4: u32EffAddr = 0; /*none */ break;
8565 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8566 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8567 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8568 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8569 }
8570 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8571
8572 /* add base */
8573 switch (bSib & X86_SIB_BASE_MASK)
8574 {
8575 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8576 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8577 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8578 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8579 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8580 case 5:
8581 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8582 {
8583 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8584 SET_SS_DEF();
8585 }
8586 else
8587 {
8588 uint32_t u32Disp;
8589 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8590 u32EffAddr += u32Disp;
8591 }
8592 break;
8593 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8594 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8595 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8596 }
8597 break;
8598 }
8599 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8600 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8601 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8602 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8603 }
8604
8605 /* Get and add the displacement. */
8606 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8607 {
8608 case 0:
8609 break;
8610 case 1:
8611 {
8612 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8613 u32EffAddr += i8Disp;
8614 break;
8615 }
8616 case 2:
8617 {
8618 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8619 u32EffAddr += u32Disp;
8620 break;
8621 }
8622 default:
8623 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */
8624 }
8625 }
8626
8627 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8628 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
8629 return u32EffAddr;
8630 }
8631
8632 uint64_t u64EffAddr;
8633
8634 /* Handle the rip+disp32 form with no registers first. */
8635 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8636 {
8637 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8638 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8639 }
8640 else
8641 {
8642 /* Get the register (or SIB) value. */
8643 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8644 {
8645 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8646 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8647 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8648 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8649 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8650 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8651 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8652 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8653 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8654 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8655 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8656 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8657 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8658 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8659 /* SIB */
8660 case 4:
8661 case 12:
8662 {
8663 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8664
8665 /* Get the index and scale it. */
8666 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8667 {
8668 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8669 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8670 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8671 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8672 case 4: u64EffAddr = 0; /*none */ break;
8673 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8674 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8675 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8676 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8677 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8678 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8679 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8680 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8681 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8682 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8683 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8684 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8685 }
8686 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8687
8688 /* add base */
8689 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8690 {
8691 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8692 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8693 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8694 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8695 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8696 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8697 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8698 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8699 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8700 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8701 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8702 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8703 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8704 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8705 /* complicated encodings */
8706 case 5:
8707 case 13:
8708 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8709 {
8710 if (!pVCpu->iem.s.uRexB)
8711 {
8712 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8713 SET_SS_DEF();
8714 }
8715 else
8716 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8717 }
8718 else
8719 {
8720 uint32_t u32Disp;
8721 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8722 u64EffAddr += (int32_t)u32Disp;
8723 }
8724 break;
8725 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8726 }
8727 break;
8728 }
8729 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8730 }
8731
8732 /* Get and add the displacement. */
8733 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8734 {
8735 case 0:
8736 break;
8737 case 1:
8738 {
8739 int8_t i8Disp;
8740 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8741 u64EffAddr += i8Disp;
8742 break;
8743 }
8744 case 2:
8745 {
8746 uint32_t u32Disp;
8747 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8748 u64EffAddr += (int32_t)u32Disp;
8749 break;
8750 }
8751 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
8752 }
8753
8754 }
8755
8756 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8757 {
8758 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
8759 return u64EffAddr;
8760 }
8761 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8762 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
8763 return u64EffAddr & UINT32_MAX;
8764}
8765#endif /* IEM_WITH_SETJMP */
8766
8767
8768/**
8769 * Calculates the effective address of a ModR/M memory operand, extended version
8770 * for use in the recompilers.
8771 *
8772 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8773 *
8774 * @return Strict VBox status code.
8775 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8776 * @param bRm The ModRM byte.
8777 * @param cbImmAndRspOffset - First byte: The size of any immediate
8778 * following the effective address opcode bytes
8779 * (only for RIP relative addressing).
8780 * - Second byte: RSP displacement (for POP [ESP]).
8781 * @param pGCPtrEff Where to return the effective address.
8782 * @param puInfo Extra info: 32-bit displacement (bits 31:0) and
8783 * SIB byte (bits 39:32).
8784 */
8785VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT
8786{
8787 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8788# define SET_SS_DEF() \
8789 do \
8790 { \
8791 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8792 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8793 } while (0)
8794
8795 uint64_t uInfo;
8796 if (!IEM_IS_64BIT_CODE(pVCpu))
8797 {
8798/** @todo Check the effective address size crap! */
8799 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8800 {
8801 uint16_t u16EffAddr;
8802
8803 /* Handle the disp16 form with no registers first. */
8804 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8805 {
8806 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8807 uInfo = u16EffAddr;
8808 }
8809 else
8810 {
8811 /* Get the displacment. */
8812 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8813 {
8814 case 0: u16EffAddr = 0; break;
8815 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8816 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8817 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8818 }
8819 uInfo = u16EffAddr;
8820
8821 /* Add the base and index registers to the disp. */
8822 switch (bRm & X86_MODRM_RM_MASK)
8823 {
8824 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8825 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8826 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8827 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8828 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8829 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8830 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8831 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8832 }
8833 }
8834
8835 *pGCPtrEff = u16EffAddr;
8836 }
8837 else
8838 {
8839 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8840 uint32_t u32EffAddr;
8841
8842 /* Handle the disp32 form with no registers first. */
8843 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8844 {
8845 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8846 uInfo = u32EffAddr;
8847 }
8848 else
8849 {
8850 /* Get the register (or SIB) value. */
8851 uInfo = 0;
8852 switch ((bRm & X86_MODRM_RM_MASK))
8853 {
8854 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8855 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8856 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8857 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8858 case 4: /* SIB */
8859 {
8860 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8861 uInfo = (uint64_t)bSib << 32;
8862
8863 /* Get the index and scale it. */
8864 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8865 {
8866 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8867 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8868 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8869 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8870 case 4: u32EffAddr = 0; /*none */ break;
8871 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8872 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8873 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8874 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8875 }
8876 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8877
8878 /* add base */
8879 switch (bSib & X86_SIB_BASE_MASK)
8880 {
8881 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8882 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8883 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8884 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8885 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8886 case 5:
8887 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8888 {
8889 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8890 SET_SS_DEF();
8891 }
8892 else
8893 {
8894 uint32_t u32Disp;
8895 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8896 u32EffAddr += u32Disp;
8897 uInfo |= u32Disp;
8898 }
8899 break;
8900 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8901 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8902 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8903 }
8904 break;
8905 }
8906 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8907 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8908 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8909 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8910 }
8911
8912 /* Get and add the displacement. */
8913 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8914 {
8915 case 0:
8916 break;
8917 case 1:
8918 {
8919 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8920 u32EffAddr += i8Disp;
8921 uInfo |= (uint32_t)(int32_t)i8Disp;
8922 break;
8923 }
8924 case 2:
8925 {
8926 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8927 u32EffAddr += u32Disp;
8928 uInfo |= (uint32_t)u32Disp;
8929 break;
8930 }
8931 default:
8932 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8933 }
8934
8935 }
8936 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8937 *pGCPtrEff = u32EffAddr;
8938 }
8939 }
8940 else
8941 {
8942 uint64_t u64EffAddr;
8943
8944 /* Handle the rip+disp32 form with no registers first. */
8945 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8946 {
8947 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8948 uInfo = (uint32_t)u64EffAddr;
8949 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8950 }
8951 else
8952 {
8953 /* Get the register (or SIB) value. */
8954 uInfo = 0;
8955 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8956 {
8957 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8958 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8959 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8960 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8961 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8962 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8963 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8964 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8965 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8966 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8967 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8968 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8969 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8970 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8971 /* SIB */
8972 case 4:
8973 case 12:
8974 {
8975 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8976 uInfo = (uint64_t)bSib << 32;
8977
8978 /* Get the index and scale it. */
8979 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8980 {
8981 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8982 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8983 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8984 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8985 case 4: u64EffAddr = 0; /*none */ break;
8986 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8987 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8988 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8989 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8990 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8991 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8992 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8993 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8994 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8995 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8996 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8997 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8998 }
8999 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9000
9001 /* add base */
9002 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9003 {
9004 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9005 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9006 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9007 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9008 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9009 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9010 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9011 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9012 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9013 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9014 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9015 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9016 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9017 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9018 /* complicated encodings */
9019 case 5:
9020 case 13:
9021 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9022 {
9023 if (!pVCpu->iem.s.uRexB)
9024 {
9025 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9026 SET_SS_DEF();
9027 }
9028 else
9029 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9030 }
9031 else
9032 {
9033 uint32_t u32Disp;
9034 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9035 u64EffAddr += (int32_t)u32Disp;
9036 uInfo |= u32Disp;
9037 }
9038 break;
9039 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9040 }
9041 break;
9042 }
9043 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9044 }
9045
9046 /* Get and add the displacement. */
9047 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9048 {
9049 case 0:
9050 break;
9051 case 1:
9052 {
9053 int8_t i8Disp;
9054 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9055 u64EffAddr += i8Disp;
9056 uInfo |= (uint32_t)(int32_t)i8Disp;
9057 break;
9058 }
9059 case 2:
9060 {
9061 uint32_t u32Disp;
9062 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9063 u64EffAddr += (int32_t)u32Disp;
9064 uInfo |= u32Disp;
9065 break;
9066 }
9067 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9068 }
9069
9070 }
9071
9072 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9073 *pGCPtrEff = u64EffAddr;
9074 else
9075 {
9076 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9077 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9078 }
9079 }
9080 *puInfo = uInfo;
9081
9082 Log5(("iemOpHlpCalcRmEffAddrEx: EffAddr=%#010RGv uInfo=%RX64\n", *pGCPtrEff, uInfo));
9083 return VINF_SUCCESS;
9084}
9085
9086/** @} */
9087
9088
9089#ifdef LOG_ENABLED
9090/**
9091 * Logs the current instruction.
9092 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9093 * @param fSameCtx Set if we have the same context information as the VMM,
9094 * clear if we may have already executed an instruction in
9095 * our debug context. When clear, we assume IEMCPU holds
9096 * valid CPU mode info.
9097 *
9098 * The @a fSameCtx parameter is now misleading and obsolete.
9099 * @param pszFunction The IEM function doing the execution.
9100 */
9101static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
9102{
9103# ifdef IN_RING3
9104 if (LogIs2Enabled())
9105 {
9106 char szInstr[256];
9107 uint32_t cbInstr = 0;
9108 if (fSameCtx)
9109 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
9110 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9111 szInstr, sizeof(szInstr), &cbInstr);
9112 else
9113 {
9114 uint32_t fFlags = 0;
9115 switch (IEM_GET_CPU_MODE(pVCpu))
9116 {
9117 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
9118 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
9119 case IEMMODE_16BIT:
9120 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
9121 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
9122 else
9123 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
9124 break;
9125 }
9126 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
9127 szInstr, sizeof(szInstr), &cbInstr);
9128 }
9129
9130 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9131 Log2(("**** %s fExec=%x\n"
9132 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
9133 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
9134 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
9135 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
9136 " %s\n"
9137 , pszFunction, pVCpu->iem.s.fExec,
9138 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
9139 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
9140 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
9141 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
9142 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
9143 szInstr));
9144
9145 /* This stuff sucks atm. as it fills the log with MSRs. */
9146 //if (LogIs3Enabled())
9147 // DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
9148 }
9149 else
9150# endif
9151 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
9152 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
9153 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
9154}
9155#endif /* LOG_ENABLED */
9156
9157
9158#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9159/**
9160 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
9161 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
9162 *
9163 * @returns Modified rcStrict.
9164 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9165 * @param rcStrict The instruction execution status.
9166 */
9167static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
9168{
9169 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
9170 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
9171 {
9172 /* VMX preemption timer takes priority over NMI-window exits. */
9173 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
9174 {
9175 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
9176 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
9177 }
9178 /*
9179 * Check remaining intercepts.
9180 *
9181 * NMI-window and Interrupt-window VM-exits.
9182 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
9183 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
9184 *
9185 * See Intel spec. 26.7.6 "NMI-Window Exiting".
9186 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
9187 */
9188 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
9189 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9190 && !TRPMHasTrap(pVCpu))
9191 {
9192 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
9193 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
9194 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
9195 {
9196 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
9197 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
9198 }
9199 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
9200 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
9201 {
9202 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
9203 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
9204 }
9205 }
9206 }
9207 /* TPR-below threshold/APIC write has the highest priority. */
9208 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
9209 {
9210 rcStrict = iemVmxApicWriteEmulation(pVCpu);
9211 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9212 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
9213 }
9214 /* MTF takes priority over VMX-preemption timer. */
9215 else
9216 {
9217 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
9218 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9219 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
9220 }
9221 return rcStrict;
9222}
9223#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9224
9225
9226/**
9227 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
9228 * IEMExecOneWithPrefetchedByPC.
9229 *
9230 * Similar code is found in IEMExecLots.
9231 *
9232 * @return Strict VBox status code.
9233 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9234 * @param fExecuteInhibit If set, execute the instruction following CLI,
9235 * POP SS and MOV SS,GR.
9236 * @param pszFunction The calling function name.
9237 */
9238DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
9239{
9240 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9241 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9242 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9243 RT_NOREF_PV(pszFunction);
9244
9245#ifdef IEM_WITH_SETJMP
9246 VBOXSTRICTRC rcStrict;
9247 IEM_TRY_SETJMP(pVCpu, rcStrict)
9248 {
9249 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9250 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9251 }
9252 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9253 {
9254 pVCpu->iem.s.cLongJumps++;
9255 }
9256 IEM_CATCH_LONGJMP_END(pVCpu);
9257#else
9258 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9259 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9260#endif
9261 if (rcStrict == VINF_SUCCESS)
9262 pVCpu->iem.s.cInstructions++;
9263 if (pVCpu->iem.s.cActiveMappings > 0)
9264 {
9265 Assert(rcStrict != VINF_SUCCESS);
9266 iemMemRollback(pVCpu);
9267 }
9268 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9269 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9270 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9271
9272//#ifdef DEBUG
9273// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
9274//#endif
9275
9276#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9277 /*
9278 * Perform any VMX nested-guest instruction boundary actions.
9279 *
9280 * If any of these causes a VM-exit, we must skip executing the next
9281 * instruction (would run into stale page tables). A VM-exit makes sure
9282 * there is no interrupt-inhibition, so that should ensure we don't go
9283 * to try execute the next instruction. Clearing fExecuteInhibit is
9284 * problematic because of the setjmp/longjmp clobbering above.
9285 */
9286 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9287 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
9288 || rcStrict != VINF_SUCCESS)
9289 { /* likely */ }
9290 else
9291 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9292#endif
9293
9294 /* Execute the next instruction as well if a cli, pop ss or
9295 mov ss, Gr has just completed successfully. */
9296 if ( fExecuteInhibit
9297 && rcStrict == VINF_SUCCESS
9298 && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
9299 {
9300 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fExec & (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK));
9301 if (rcStrict == VINF_SUCCESS)
9302 {
9303#ifdef LOG_ENABLED
9304 iemLogCurInstr(pVCpu, false, pszFunction);
9305#endif
9306#ifdef IEM_WITH_SETJMP
9307 IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
9308 {
9309 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9310 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9311 }
9312 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9313 {
9314 pVCpu->iem.s.cLongJumps++;
9315 }
9316 IEM_CATCH_LONGJMP_END(pVCpu);
9317#else
9318 IEM_OPCODE_GET_FIRST_U8(&b);
9319 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9320#endif
9321 if (rcStrict == VINF_SUCCESS)
9322 {
9323 pVCpu->iem.s.cInstructions++;
9324#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9325 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9326 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
9327 { /* likely */ }
9328 else
9329 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9330#endif
9331 }
9332 if (pVCpu->iem.s.cActiveMappings > 0)
9333 {
9334 Assert(rcStrict != VINF_SUCCESS);
9335 iemMemRollback(pVCpu);
9336 }
9337 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9338 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9339 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9340 }
9341 else if (pVCpu->iem.s.cActiveMappings > 0)
9342 iemMemRollback(pVCpu);
9343 /** @todo drop this after we bake this change into RIP advancing. */
9344 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
9345 }
9346
9347 /*
9348 * Return value fiddling, statistics and sanity assertions.
9349 */
9350 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9351
9352 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9353 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9354 return rcStrict;
9355}
9356
9357
9358/**
9359 * Execute one instruction.
9360 *
9361 * @return Strict VBox status code.
9362 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9363 */
9364VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
9365{
9366 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
9367#ifdef LOG_ENABLED
9368 iemLogCurInstr(pVCpu, true, "IEMExecOne");
9369#endif
9370
9371 /*
9372 * Do the decoding and emulation.
9373 */
9374 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9375 if (rcStrict == VINF_SUCCESS)
9376 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
9377 else if (pVCpu->iem.s.cActiveMappings > 0)
9378 iemMemRollback(pVCpu);
9379
9380 if (rcStrict != VINF_SUCCESS)
9381 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9382 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9383 return rcStrict;
9384}
9385
9386
9387VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9388{
9389 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9390 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9391 if (rcStrict == VINF_SUCCESS)
9392 {
9393 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
9394 if (pcbWritten)
9395 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9396 }
9397 else if (pVCpu->iem.s.cActiveMappings > 0)
9398 iemMemRollback(pVCpu);
9399
9400 return rcStrict;
9401}
9402
9403
9404VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9405 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9406{
9407 VBOXSTRICTRC rcStrict;
9408 if ( cbOpcodeBytes
9409 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9410 {
9411 iemInitDecoder(pVCpu, 0 /*fExecOpts*/);
9412#ifdef IEM_WITH_CODE_TLB
9413 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9414 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9415 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9416 pVCpu->iem.s.offCurInstrStart = 0;
9417 pVCpu->iem.s.offInstrNextByte = 0;
9418 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9419#else
9420 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9421 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9422#endif
9423 rcStrict = VINF_SUCCESS;
9424 }
9425 else
9426 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9427 if (rcStrict == VINF_SUCCESS)
9428 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
9429 else if (pVCpu->iem.s.cActiveMappings > 0)
9430 iemMemRollback(pVCpu);
9431
9432 return rcStrict;
9433}
9434
9435
9436VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9437{
9438 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9439 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9440 if (rcStrict == VINF_SUCCESS)
9441 {
9442 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
9443 if (pcbWritten)
9444 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9445 }
9446 else if (pVCpu->iem.s.cActiveMappings > 0)
9447 iemMemRollback(pVCpu);
9448
9449 return rcStrict;
9450}
9451
9452
9453VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9454 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9455{
9456 VBOXSTRICTRC rcStrict;
9457 if ( cbOpcodeBytes
9458 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9459 {
9460 iemInitDecoder(pVCpu, IEM_F_BYPASS_HANDLERS);
9461#ifdef IEM_WITH_CODE_TLB
9462 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9463 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9464 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9465 pVCpu->iem.s.offCurInstrStart = 0;
9466 pVCpu->iem.s.offInstrNextByte = 0;
9467 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9468#else
9469 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9470 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9471#endif
9472 rcStrict = VINF_SUCCESS;
9473 }
9474 else
9475 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9476 if (rcStrict == VINF_SUCCESS)
9477 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
9478 else if (pVCpu->iem.s.cActiveMappings > 0)
9479 iemMemRollback(pVCpu);
9480
9481 return rcStrict;
9482}
9483
9484
9485/**
9486 * For handling split cacheline lock operations when the host has split-lock
9487 * detection enabled.
9488 *
9489 * This will cause the interpreter to disregard the lock prefix and implicit
9490 * locking (xchg).
9491 *
9492 * @returns Strict VBox status code.
9493 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9494 */
9495VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
9496{
9497 /*
9498 * Do the decoding and emulation.
9499 */
9500 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_X86_DISREGARD_LOCK);
9501 if (rcStrict == VINF_SUCCESS)
9502 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
9503 else if (pVCpu->iem.s.cActiveMappings > 0)
9504 iemMemRollback(pVCpu);
9505
9506 if (rcStrict != VINF_SUCCESS)
9507 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9508 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9509 return rcStrict;
9510}
9511
9512
9513/**
9514 * Code common to IEMExecLots and IEMExecRecompilerThreaded that attempts to
9515 * inject a pending TRPM trap.
9516 */
9517VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu)
9518{
9519 Assert(TRPMHasTrap(pVCpu));
9520
9521 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9522 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
9523 {
9524 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
9525#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9526 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
9527 if (fIntrEnabled)
9528 {
9529 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
9530 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9531 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
9532 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
9533 else
9534 {
9535 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
9536 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
9537 }
9538 }
9539#else
9540 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9541#endif
9542 if (fIntrEnabled)
9543 {
9544 uint8_t u8TrapNo;
9545 TRPMEVENT enmType;
9546 uint32_t uErrCode;
9547 RTGCPTR uCr2;
9548 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
9549 AssertRC(rc2);
9550 Assert(enmType == TRPM_HARDWARE_INT);
9551 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
9552
9553 TRPMResetTrap(pVCpu);
9554
9555#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9556 /* Injecting an event may cause a VM-exit. */
9557 if ( rcStrict != VINF_SUCCESS
9558 && rcStrict != VINF_IEM_RAISED_XCPT)
9559 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
9560#else
9561 NOREF(rcStrict);
9562#endif
9563 }
9564 }
9565
9566 return VINF_SUCCESS;
9567}
9568
9569
9570VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
9571{
9572 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
9573 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
9574 Assert(cMaxInstructions > 0);
9575
9576 /*
9577 * See if there is an interrupt pending in TRPM, inject it if we can.
9578 */
9579 /** @todo What if we are injecting an exception and not an interrupt? Is that
9580 * possible here? For now we assert it is indeed only an interrupt. */
9581 if (!TRPMHasTrap(pVCpu))
9582 { /* likely */ }
9583 else
9584 {
9585 VBOXSTRICTRC rcStrict = iemExecInjectPendingTrap(pVCpu);
9586 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9587 { /*likely */ }
9588 else
9589 return rcStrict;
9590 }
9591
9592 /*
9593 * Initial decoder init w/ prefetch, then setup setjmp.
9594 */
9595 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9596 if (rcStrict == VINF_SUCCESS)
9597 {
9598#ifdef IEM_WITH_SETJMP
9599 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
9600 IEM_TRY_SETJMP(pVCpu, rcStrict)
9601#endif
9602 {
9603 /*
9604 * The run loop. We limit ourselves to 4096 instructions right now.
9605 */
9606 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
9607 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9608 for (;;)
9609 {
9610 /*
9611 * Log the state.
9612 */
9613#ifdef LOG_ENABLED
9614 iemLogCurInstr(pVCpu, true, "IEMExecLots");
9615#endif
9616
9617 /*
9618 * Do the decoding and emulation.
9619 */
9620 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9621 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9622#ifdef VBOX_STRICT
9623 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
9624#endif
9625 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9626 {
9627 Assert(pVCpu->iem.s.cActiveMappings == 0);
9628 pVCpu->iem.s.cInstructions++;
9629
9630#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9631 /* Perform any VMX nested-guest instruction boundary actions. */
9632 uint64_t fCpu = pVCpu->fLocalForcedActions;
9633 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9634 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
9635 { /* likely */ }
9636 else
9637 {
9638 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9639 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9640 fCpu = pVCpu->fLocalForcedActions;
9641 else
9642 {
9643 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9644 break;
9645 }
9646 }
9647#endif
9648 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9649 {
9650#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
9651 uint64_t fCpu = pVCpu->fLocalForcedActions;
9652#endif
9653 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9654 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9655 | VMCPU_FF_TLB_FLUSH
9656 | VMCPU_FF_UNHALT );
9657
9658 if (RT_LIKELY( ( !fCpu
9659 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9660 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
9661 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
9662 {
9663 if (--cMaxInstructionsGccStupidity > 0)
9664 {
9665 /* Poll timers every now an then according to the caller's specs. */
9666 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
9667 || !TMTimerPollBool(pVM, pVCpu))
9668 {
9669 Assert(pVCpu->iem.s.cActiveMappings == 0);
9670 iemReInitDecoder(pVCpu);
9671 continue;
9672 }
9673 }
9674 }
9675 }
9676 Assert(pVCpu->iem.s.cActiveMappings == 0);
9677 }
9678 else if (pVCpu->iem.s.cActiveMappings > 0)
9679 iemMemRollback(pVCpu);
9680 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9681 break;
9682 }
9683 }
9684#ifdef IEM_WITH_SETJMP
9685 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9686 {
9687 if (pVCpu->iem.s.cActiveMappings > 0)
9688 iemMemRollback(pVCpu);
9689# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9690 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9691# endif
9692 pVCpu->iem.s.cLongJumps++;
9693 }
9694 IEM_CATCH_LONGJMP_END(pVCpu);
9695#endif
9696
9697 /*
9698 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9699 */
9700 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9701 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9702 }
9703 else
9704 {
9705 if (pVCpu->iem.s.cActiveMappings > 0)
9706 iemMemRollback(pVCpu);
9707
9708#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9709 /*
9710 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9711 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9712 */
9713 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9714#endif
9715 }
9716
9717 /*
9718 * Maybe re-enter raw-mode and log.
9719 */
9720 if (rcStrict != VINF_SUCCESS)
9721 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9722 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9723 if (pcInstructions)
9724 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
9725 return rcStrict;
9726}
9727
9728
9729/**
9730 * Interface used by EMExecuteExec, does exit statistics and limits.
9731 *
9732 * @returns Strict VBox status code.
9733 * @param pVCpu The cross context virtual CPU structure.
9734 * @param fWillExit To be defined.
9735 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
9736 * @param cMaxInstructions Maximum number of instructions to execute.
9737 * @param cMaxInstructionsWithoutExits
9738 * The max number of instructions without exits.
9739 * @param pStats Where to return statistics.
9740 */
9741VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
9742 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
9743{
9744 NOREF(fWillExit); /** @todo define flexible exit crits */
9745
9746 /*
9747 * Initialize return stats.
9748 */
9749 pStats->cInstructions = 0;
9750 pStats->cExits = 0;
9751 pStats->cMaxExitDistance = 0;
9752 pStats->cReserved = 0;
9753
9754 /*
9755 * Initial decoder init w/ prefetch, then setup setjmp.
9756 */
9757 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9758 if (rcStrict == VINF_SUCCESS)
9759 {
9760#ifdef IEM_WITH_SETJMP
9761 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf?!? */
9762 IEM_TRY_SETJMP(pVCpu, rcStrict)
9763#endif
9764 {
9765#ifdef IN_RING0
9766 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
9767#endif
9768 uint32_t cInstructionSinceLastExit = 0;
9769
9770 /*
9771 * The run loop. We limit ourselves to 4096 instructions right now.
9772 */
9773 PVM pVM = pVCpu->CTX_SUFF(pVM);
9774 for (;;)
9775 {
9776 /*
9777 * Log the state.
9778 */
9779#ifdef LOG_ENABLED
9780 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
9781#endif
9782
9783 /*
9784 * Do the decoding and emulation.
9785 */
9786 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
9787
9788 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9789 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9790
9791 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
9792 && cInstructionSinceLastExit > 0 /* don't count the first */ )
9793 {
9794 pStats->cExits += 1;
9795 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
9796 pStats->cMaxExitDistance = cInstructionSinceLastExit;
9797 cInstructionSinceLastExit = 0;
9798 }
9799
9800 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9801 {
9802 Assert(pVCpu->iem.s.cActiveMappings == 0);
9803 pVCpu->iem.s.cInstructions++;
9804 pStats->cInstructions++;
9805 cInstructionSinceLastExit++;
9806
9807#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9808 /* Perform any VMX nested-guest instruction boundary actions. */
9809 uint64_t fCpu = pVCpu->fLocalForcedActions;
9810 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9811 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
9812 { /* likely */ }
9813 else
9814 {
9815 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9816 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9817 fCpu = pVCpu->fLocalForcedActions;
9818 else
9819 {
9820 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9821 break;
9822 }
9823 }
9824#endif
9825 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9826 {
9827#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
9828 uint64_t fCpu = pVCpu->fLocalForcedActions;
9829#endif
9830 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9831 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9832 | VMCPU_FF_TLB_FLUSH
9833 | VMCPU_FF_UNHALT );
9834 if (RT_LIKELY( ( ( !fCpu
9835 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9836 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
9837 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
9838 || pStats->cInstructions < cMinInstructions))
9839 {
9840 if (pStats->cInstructions < cMaxInstructions)
9841 {
9842 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
9843 {
9844#ifdef IN_RING0
9845 if ( !fCheckPreemptionPending
9846 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
9847#endif
9848 {
9849 Assert(pVCpu->iem.s.cActiveMappings == 0);
9850 iemReInitDecoder(pVCpu);
9851 continue;
9852 }
9853#ifdef IN_RING0
9854 rcStrict = VINF_EM_RAW_INTERRUPT;
9855 break;
9856#endif
9857 }
9858 }
9859 }
9860 Assert(!(fCpu & VMCPU_FF_IEM));
9861 }
9862 Assert(pVCpu->iem.s.cActiveMappings == 0);
9863 }
9864 else if (pVCpu->iem.s.cActiveMappings > 0)
9865 iemMemRollback(pVCpu);
9866 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9867 break;
9868 }
9869 }
9870#ifdef IEM_WITH_SETJMP
9871 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9872 {
9873 if (pVCpu->iem.s.cActiveMappings > 0)
9874 iemMemRollback(pVCpu);
9875 pVCpu->iem.s.cLongJumps++;
9876 }
9877 IEM_CATCH_LONGJMP_END(pVCpu);
9878#endif
9879
9880 /*
9881 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9882 */
9883 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9884 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9885 }
9886 else
9887 {
9888 if (pVCpu->iem.s.cActiveMappings > 0)
9889 iemMemRollback(pVCpu);
9890
9891#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9892 /*
9893 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9894 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9895 */
9896 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9897#endif
9898 }
9899
9900 /*
9901 * Maybe re-enter raw-mode and log.
9902 */
9903 if (rcStrict != VINF_SUCCESS)
9904 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
9905 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
9906 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
9907 return rcStrict;
9908}
9909
9910
9911/**
9912 * Injects a trap, fault, abort, software interrupt or external interrupt.
9913 *
9914 * The parameter list matches TRPMQueryTrapAll pretty closely.
9915 *
9916 * @returns Strict VBox status code.
9917 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9918 * @param u8TrapNo The trap number.
9919 * @param enmType What type is it (trap/fault/abort), software
9920 * interrupt or hardware interrupt.
9921 * @param uErrCode The error code if applicable.
9922 * @param uCr2 The CR2 value if applicable.
9923 * @param cbInstr The instruction length (only relevant for
9924 * software interrupts).
9925 */
9926VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
9927 uint8_t cbInstr)
9928{
9929 iemInitDecoder(pVCpu, 0 /*fExecOpts*/); /** @todo wrong init function! */
9930#ifdef DBGFTRACE_ENABLED
9931 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
9932 u8TrapNo, enmType, uErrCode, uCr2);
9933#endif
9934
9935 uint32_t fFlags;
9936 switch (enmType)
9937 {
9938 case TRPM_HARDWARE_INT:
9939 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
9940 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
9941 uErrCode = uCr2 = 0;
9942 break;
9943
9944 case TRPM_SOFTWARE_INT:
9945 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
9946 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
9947 uErrCode = uCr2 = 0;
9948 break;
9949
9950 case TRPM_TRAP:
9951 case TRPM_NMI: /** @todo Distinguish NMI from exception 2. */
9952 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
9953 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
9954 if (u8TrapNo == X86_XCPT_PF)
9955 fFlags |= IEM_XCPT_FLAGS_CR2;
9956 switch (u8TrapNo)
9957 {
9958 case X86_XCPT_DF:
9959 case X86_XCPT_TS:
9960 case X86_XCPT_NP:
9961 case X86_XCPT_SS:
9962 case X86_XCPT_PF:
9963 case X86_XCPT_AC:
9964 case X86_XCPT_GP:
9965 fFlags |= IEM_XCPT_FLAGS_ERR;
9966 break;
9967 }
9968 break;
9969
9970 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9971 }
9972
9973 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
9974
9975 if (pVCpu->iem.s.cActiveMappings > 0)
9976 iemMemRollback(pVCpu);
9977
9978 return rcStrict;
9979}
9980
9981
9982/**
9983 * Injects the active TRPM event.
9984 *
9985 * @returns Strict VBox status code.
9986 * @param pVCpu The cross context virtual CPU structure.
9987 */
9988VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
9989{
9990#ifndef IEM_IMPLEMENTS_TASKSWITCH
9991 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
9992#else
9993 uint8_t u8TrapNo;
9994 TRPMEVENT enmType;
9995 uint32_t uErrCode;
9996 RTGCUINTPTR uCr2;
9997 uint8_t cbInstr;
9998 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
9999 if (RT_FAILURE(rc))
10000 return rc;
10001
10002 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
10003 * ICEBP \#DB injection as a special case. */
10004 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
10005#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
10006 if (rcStrict == VINF_SVM_VMEXIT)
10007 rcStrict = VINF_SUCCESS;
10008#endif
10009#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10010 if (rcStrict == VINF_VMX_VMEXIT)
10011 rcStrict = VINF_SUCCESS;
10012#endif
10013 /** @todo Are there any other codes that imply the event was successfully
10014 * delivered to the guest? See @bugref{6607}. */
10015 if ( rcStrict == VINF_SUCCESS
10016 || rcStrict == VINF_IEM_RAISED_XCPT)
10017 TRPMResetTrap(pVCpu);
10018
10019 return rcStrict;
10020#endif
10021}
10022
10023
10024VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
10025{
10026 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10027 return VERR_NOT_IMPLEMENTED;
10028}
10029
10030
10031VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
10032{
10033 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10034 return VERR_NOT_IMPLEMENTED;
10035}
10036
10037
10038/**
10039 * Interface for HM and EM for executing string I/O OUT (write) instructions.
10040 *
10041 * This API ASSUMES that the caller has already verified that the guest code is
10042 * allowed to access the I/O port. (The I/O port is in the DX register in the
10043 * guest state.)
10044 *
10045 * @returns Strict VBox status code.
10046 * @param pVCpu The cross context virtual CPU structure.
10047 * @param cbValue The size of the I/O port access (1, 2, or 4).
10048 * @param enmAddrMode The addressing mode.
10049 * @param fRepPrefix Indicates whether a repeat prefix is used
10050 * (doesn't matter which for this instruction).
10051 * @param cbInstr The instruction length in bytes.
10052 * @param iEffSeg The effective segment address.
10053 * @param fIoChecked Whether the access to the I/O port has been
10054 * checked or not. It's typically checked in the
10055 * HM scenario.
10056 */
10057VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10058 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
10059{
10060 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
10061 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10062
10063 /*
10064 * State init.
10065 */
10066 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10067
10068 /*
10069 * Switch orgy for getting to the right handler.
10070 */
10071 VBOXSTRICTRC rcStrict;
10072 if (fRepPrefix)
10073 {
10074 switch (enmAddrMode)
10075 {
10076 case IEMMODE_16BIT:
10077 switch (cbValue)
10078 {
10079 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10080 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10081 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10082 default:
10083 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10084 }
10085 break;
10086
10087 case IEMMODE_32BIT:
10088 switch (cbValue)
10089 {
10090 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10091 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10092 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10093 default:
10094 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10095 }
10096 break;
10097
10098 case IEMMODE_64BIT:
10099 switch (cbValue)
10100 {
10101 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10102 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10103 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10104 default:
10105 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10106 }
10107 break;
10108
10109 default:
10110 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10111 }
10112 }
10113 else
10114 {
10115 switch (enmAddrMode)
10116 {
10117 case IEMMODE_16BIT:
10118 switch (cbValue)
10119 {
10120 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10121 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10122 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10123 default:
10124 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10125 }
10126 break;
10127
10128 case IEMMODE_32BIT:
10129 switch (cbValue)
10130 {
10131 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10132 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10133 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10134 default:
10135 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10136 }
10137 break;
10138
10139 case IEMMODE_64BIT:
10140 switch (cbValue)
10141 {
10142 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10143 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10144 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10145 default:
10146 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10147 }
10148 break;
10149
10150 default:
10151 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10152 }
10153 }
10154
10155 if (pVCpu->iem.s.cActiveMappings)
10156 iemMemRollback(pVCpu);
10157
10158 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10159}
10160
10161
10162/**
10163 * Interface for HM and EM for executing string I/O IN (read) instructions.
10164 *
10165 * This API ASSUMES that the caller has already verified that the guest code is
10166 * allowed to access the I/O port. (The I/O port is in the DX register in the
10167 * guest state.)
10168 *
10169 * @returns Strict VBox status code.
10170 * @param pVCpu The cross context virtual CPU structure.
10171 * @param cbValue The size of the I/O port access (1, 2, or 4).
10172 * @param enmAddrMode The addressing mode.
10173 * @param fRepPrefix Indicates whether a repeat prefix is used
10174 * (doesn't matter which for this instruction).
10175 * @param cbInstr The instruction length in bytes.
10176 * @param fIoChecked Whether the access to the I/O port has been
10177 * checked or not. It's typically checked in the
10178 * HM scenario.
10179 */
10180VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10181 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
10182{
10183 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10184
10185 /*
10186 * State init.
10187 */
10188 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10189
10190 /*
10191 * Switch orgy for getting to the right handler.
10192 */
10193 VBOXSTRICTRC rcStrict;
10194 if (fRepPrefix)
10195 {
10196 switch (enmAddrMode)
10197 {
10198 case IEMMODE_16BIT:
10199 switch (cbValue)
10200 {
10201 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10202 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10203 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10204 default:
10205 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10206 }
10207 break;
10208
10209 case IEMMODE_32BIT:
10210 switch (cbValue)
10211 {
10212 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10213 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10214 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10215 default:
10216 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10217 }
10218 break;
10219
10220 case IEMMODE_64BIT:
10221 switch (cbValue)
10222 {
10223 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10224 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10225 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10226 default:
10227 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10228 }
10229 break;
10230
10231 default:
10232 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10233 }
10234 }
10235 else
10236 {
10237 switch (enmAddrMode)
10238 {
10239 case IEMMODE_16BIT:
10240 switch (cbValue)
10241 {
10242 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10243 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10244 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10245 default:
10246 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10247 }
10248 break;
10249
10250 case IEMMODE_32BIT:
10251 switch (cbValue)
10252 {
10253 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10254 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10255 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10256 default:
10257 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10258 }
10259 break;
10260
10261 case IEMMODE_64BIT:
10262 switch (cbValue)
10263 {
10264 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10265 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10266 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10267 default:
10268 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10269 }
10270 break;
10271
10272 default:
10273 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10274 }
10275 }
10276
10277 if ( pVCpu->iem.s.cActiveMappings == 0
10278 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
10279 { /* likely */ }
10280 else
10281 {
10282 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
10283 iemMemRollback(pVCpu);
10284 }
10285 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10286}
10287
10288
10289/**
10290 * Interface for rawmode to write execute an OUT instruction.
10291 *
10292 * @returns Strict VBox status code.
10293 * @param pVCpu The cross context virtual CPU structure.
10294 * @param cbInstr The instruction length in bytes.
10295 * @param u16Port The port to read.
10296 * @param fImm Whether the port is specified using an immediate operand or
10297 * using the implicit DX register.
10298 * @param cbReg The register size.
10299 *
10300 * @remarks In ring-0 not all of the state needs to be synced in.
10301 */
10302VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10303{
10304 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10305 Assert(cbReg <= 4 && cbReg != 3);
10306
10307 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10308 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, cbReg,
10309 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10310 Assert(!pVCpu->iem.s.cActiveMappings);
10311 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10312}
10313
10314
10315/**
10316 * Interface for rawmode to write execute an IN instruction.
10317 *
10318 * @returns Strict VBox status code.
10319 * @param pVCpu The cross context virtual CPU structure.
10320 * @param cbInstr The instruction length in bytes.
10321 * @param u16Port The port to read.
10322 * @param fImm Whether the port is specified using an immediate operand or
10323 * using the implicit DX.
10324 * @param cbReg The register size.
10325 */
10326VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10327{
10328 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10329 Assert(cbReg <= 4 && cbReg != 3);
10330
10331 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10332 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, cbReg,
10333 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10334 Assert(!pVCpu->iem.s.cActiveMappings);
10335 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10336}
10337
10338
10339/**
10340 * Interface for HM and EM to write to a CRx register.
10341 *
10342 * @returns Strict VBox status code.
10343 * @param pVCpu The cross context virtual CPU structure.
10344 * @param cbInstr The instruction length in bytes.
10345 * @param iCrReg The control register number (destination).
10346 * @param iGReg The general purpose register number (source).
10347 *
10348 * @remarks In ring-0 not all of the state needs to be synced in.
10349 */
10350VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
10351{
10352 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10353 Assert(iCrReg < 16);
10354 Assert(iGReg < 16);
10355
10356 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10357 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
10358 Assert(!pVCpu->iem.s.cActiveMappings);
10359 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10360}
10361
10362
10363/**
10364 * Interface for HM and EM to read from a CRx register.
10365 *
10366 * @returns Strict VBox status code.
10367 * @param pVCpu The cross context virtual CPU structure.
10368 * @param cbInstr The instruction length in bytes.
10369 * @param iGReg The general purpose register number (destination).
10370 * @param iCrReg The control register number (source).
10371 *
10372 * @remarks In ring-0 not all of the state needs to be synced in.
10373 */
10374VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
10375{
10376 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10377 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
10378 | CPUMCTX_EXTRN_APIC_TPR);
10379 Assert(iCrReg < 16);
10380 Assert(iGReg < 16);
10381
10382 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10383 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
10384 Assert(!pVCpu->iem.s.cActiveMappings);
10385 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10386}
10387
10388
10389/**
10390 * Interface for HM and EM to write to a DRx register.
10391 *
10392 * @returns Strict VBox status code.
10393 * @param pVCpu The cross context virtual CPU structure.
10394 * @param cbInstr The instruction length in bytes.
10395 * @param iDrReg The debug register number (destination).
10396 * @param iGReg The general purpose register number (source).
10397 *
10398 * @remarks In ring-0 not all of the state needs to be synced in.
10399 */
10400VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iDrReg, uint8_t iGReg)
10401{
10402 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10403 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10404 Assert(iDrReg < 8);
10405 Assert(iGReg < 16);
10406
10407 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10408 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Dd_Rd, iDrReg, iGReg);
10409 Assert(!pVCpu->iem.s.cActiveMappings);
10410 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10411}
10412
10413
10414/**
10415 * Interface for HM and EM to read from a DRx register.
10416 *
10417 * @returns Strict VBox status code.
10418 * @param pVCpu The cross context virtual CPU structure.
10419 * @param cbInstr The instruction length in bytes.
10420 * @param iGReg The general purpose register number (destination).
10421 * @param iDrReg The debug register number (source).
10422 *
10423 * @remarks In ring-0 not all of the state needs to be synced in.
10424 */
10425VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iDrReg)
10426{
10427 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10428 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10429 Assert(iDrReg < 8);
10430 Assert(iGReg < 16);
10431
10432 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10433 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Dd, iGReg, iDrReg);
10434 Assert(!pVCpu->iem.s.cActiveMappings);
10435 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10436}
10437
10438
10439/**
10440 * Interface for HM and EM to clear the CR0[TS] bit.
10441 *
10442 * @returns Strict VBox status code.
10443 * @param pVCpu The cross context virtual CPU structure.
10444 * @param cbInstr The instruction length in bytes.
10445 *
10446 * @remarks In ring-0 not all of the state needs to be synced in.
10447 */
10448VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
10449{
10450 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10451
10452 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10453 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
10454 Assert(!pVCpu->iem.s.cActiveMappings);
10455 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10456}
10457
10458
10459/**
10460 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
10461 *
10462 * @returns Strict VBox status code.
10463 * @param pVCpu The cross context virtual CPU structure.
10464 * @param cbInstr The instruction length in bytes.
10465 * @param uValue The value to load into CR0.
10466 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
10467 * memory operand. Otherwise pass NIL_RTGCPTR.
10468 *
10469 * @remarks In ring-0 not all of the state needs to be synced in.
10470 */
10471VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
10472{
10473 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10474
10475 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10476 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
10477 Assert(!pVCpu->iem.s.cActiveMappings);
10478 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10479}
10480
10481
10482/**
10483 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
10484 *
10485 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
10486 *
10487 * @returns Strict VBox status code.
10488 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10489 * @param cbInstr The instruction length in bytes.
10490 * @remarks In ring-0 not all of the state needs to be synced in.
10491 * @thread EMT(pVCpu)
10492 */
10493VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
10494{
10495 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10496
10497 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10498 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
10499 Assert(!pVCpu->iem.s.cActiveMappings);
10500 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10501}
10502
10503
10504/**
10505 * Interface for HM and EM to emulate the WBINVD instruction.
10506 *
10507 * @returns Strict VBox status code.
10508 * @param pVCpu The cross context virtual CPU structure.
10509 * @param cbInstr The instruction length in bytes.
10510 *
10511 * @remarks In ring-0 not all of the state needs to be synced in.
10512 */
10513VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10514{
10515 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10516
10517 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10518 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
10519 Assert(!pVCpu->iem.s.cActiveMappings);
10520 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10521}
10522
10523
10524/**
10525 * Interface for HM and EM to emulate the INVD instruction.
10526 *
10527 * @returns Strict VBox status code.
10528 * @param pVCpu The cross context virtual CPU structure.
10529 * @param cbInstr The instruction length in bytes.
10530 *
10531 * @remarks In ring-0 not all of the state needs to be synced in.
10532 */
10533VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10534{
10535 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10536
10537 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10538 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
10539 Assert(!pVCpu->iem.s.cActiveMappings);
10540 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10541}
10542
10543
10544/**
10545 * Interface for HM and EM to emulate the INVLPG instruction.
10546 *
10547 * @returns Strict VBox status code.
10548 * @retval VINF_PGM_SYNC_CR3
10549 *
10550 * @param pVCpu The cross context virtual CPU structure.
10551 * @param cbInstr The instruction length in bytes.
10552 * @param GCPtrPage The effective address of the page to invalidate.
10553 *
10554 * @remarks In ring-0 not all of the state needs to be synced in.
10555 */
10556VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
10557{
10558 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10559
10560 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10561 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
10562 Assert(!pVCpu->iem.s.cActiveMappings);
10563 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10564}
10565
10566
10567/**
10568 * Interface for HM and EM to emulate the INVPCID instruction.
10569 *
10570 * @returns Strict VBox status code.
10571 * @retval VINF_PGM_SYNC_CR3
10572 *
10573 * @param pVCpu The cross context virtual CPU structure.
10574 * @param cbInstr The instruction length in bytes.
10575 * @param iEffSeg The effective segment register.
10576 * @param GCPtrDesc The effective address of the INVPCID descriptor.
10577 * @param uType The invalidation type.
10578 *
10579 * @remarks In ring-0 not all of the state needs to be synced in.
10580 */
10581VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
10582 uint64_t uType)
10583{
10584 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
10585
10586 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10587 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
10588 Assert(!pVCpu->iem.s.cActiveMappings);
10589 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10590}
10591
10592
10593/**
10594 * Interface for HM and EM to emulate the CPUID instruction.
10595 *
10596 * @returns Strict VBox status code.
10597 *
10598 * @param pVCpu The cross context virtual CPU structure.
10599 * @param cbInstr The instruction length in bytes.
10600 *
10601 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
10602 */
10603VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
10604{
10605 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10606 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
10607
10608 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10609 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
10610 Assert(!pVCpu->iem.s.cActiveMappings);
10611 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10612}
10613
10614
10615/**
10616 * Interface for HM and EM to emulate the RDPMC instruction.
10617 *
10618 * @returns Strict VBox status code.
10619 *
10620 * @param pVCpu The cross context virtual CPU structure.
10621 * @param cbInstr The instruction length in bytes.
10622 *
10623 * @remarks Not all of the state needs to be synced in.
10624 */
10625VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
10626{
10627 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10628 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10629
10630 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10631 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
10632 Assert(!pVCpu->iem.s.cActiveMappings);
10633 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10634}
10635
10636
10637/**
10638 * Interface for HM and EM to emulate the RDTSC instruction.
10639 *
10640 * @returns Strict VBox status code.
10641 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10642 *
10643 * @param pVCpu The cross context virtual CPU structure.
10644 * @param cbInstr The instruction length in bytes.
10645 *
10646 * @remarks Not all of the state needs to be synced in.
10647 */
10648VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
10649{
10650 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10651 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10652
10653 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10654 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
10655 Assert(!pVCpu->iem.s.cActiveMappings);
10656 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10657}
10658
10659
10660/**
10661 * Interface for HM and EM to emulate the RDTSCP instruction.
10662 *
10663 * @returns Strict VBox status code.
10664 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10665 *
10666 * @param pVCpu The cross context virtual CPU structure.
10667 * @param cbInstr The instruction length in bytes.
10668 *
10669 * @remarks Not all of the state needs to be synced in. Recommended
10670 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
10671 */
10672VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
10673{
10674 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10675 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
10676
10677 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10678 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
10679 Assert(!pVCpu->iem.s.cActiveMappings);
10680 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10681}
10682
10683
10684/**
10685 * Interface for HM and EM to emulate the RDMSR instruction.
10686 *
10687 * @returns Strict VBox status code.
10688 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10689 *
10690 * @param pVCpu The cross context virtual CPU structure.
10691 * @param cbInstr The instruction length in bytes.
10692 *
10693 * @remarks Not all of the state needs to be synced in. Requires RCX and
10694 * (currently) all MSRs.
10695 */
10696VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10697{
10698 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10699 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
10700
10701 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10702 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
10703 Assert(!pVCpu->iem.s.cActiveMappings);
10704 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10705}
10706
10707
10708/**
10709 * Interface for HM and EM to emulate the WRMSR instruction.
10710 *
10711 * @returns Strict VBox status code.
10712 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10713 *
10714 * @param pVCpu The cross context virtual CPU structure.
10715 * @param cbInstr The instruction length in bytes.
10716 *
10717 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
10718 * and (currently) all MSRs.
10719 */
10720VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10721{
10722 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10723 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
10724 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
10725
10726 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10727 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
10728 Assert(!pVCpu->iem.s.cActiveMappings);
10729 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10730}
10731
10732
10733/**
10734 * Interface for HM and EM to emulate the MONITOR instruction.
10735 *
10736 * @returns Strict VBox status code.
10737 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10738 *
10739 * @param pVCpu The cross context virtual CPU structure.
10740 * @param cbInstr The instruction length in bytes.
10741 *
10742 * @remarks Not all of the state needs to be synced in.
10743 * @remarks ASSUMES the default segment of DS and no segment override prefixes
10744 * are used.
10745 */
10746VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
10747{
10748 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10749 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
10750
10751 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10752 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
10753 Assert(!pVCpu->iem.s.cActiveMappings);
10754 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10755}
10756
10757
10758/**
10759 * Interface for HM and EM to emulate the MWAIT instruction.
10760 *
10761 * @returns Strict VBox status code.
10762 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10763 *
10764 * @param pVCpu The cross context virtual CPU structure.
10765 * @param cbInstr The instruction length in bytes.
10766 *
10767 * @remarks Not all of the state needs to be synced in.
10768 */
10769VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
10770{
10771 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10772 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
10773
10774 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10775 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
10776 Assert(!pVCpu->iem.s.cActiveMappings);
10777 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10778}
10779
10780
10781/**
10782 * Interface for HM and EM to emulate the HLT instruction.
10783 *
10784 * @returns Strict VBox status code.
10785 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10786 *
10787 * @param pVCpu The cross context virtual CPU structure.
10788 * @param cbInstr The instruction length in bytes.
10789 *
10790 * @remarks Not all of the state needs to be synced in.
10791 */
10792VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
10793{
10794 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10795
10796 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10797 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
10798 Assert(!pVCpu->iem.s.cActiveMappings);
10799 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10800}
10801
10802
10803/**
10804 * Checks if IEM is in the process of delivering an event (interrupt or
10805 * exception).
10806 *
10807 * @returns true if we're in the process of raising an interrupt or exception,
10808 * false otherwise.
10809 * @param pVCpu The cross context virtual CPU structure.
10810 * @param puVector Where to store the vector associated with the
10811 * currently delivered event, optional.
10812 * @param pfFlags Where to store th event delivery flags (see
10813 * IEM_XCPT_FLAGS_XXX), optional.
10814 * @param puErr Where to store the error code associated with the
10815 * event, optional.
10816 * @param puCr2 Where to store the CR2 associated with the event,
10817 * optional.
10818 * @remarks The caller should check the flags to determine if the error code and
10819 * CR2 are valid for the event.
10820 */
10821VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
10822{
10823 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
10824 if (fRaisingXcpt)
10825 {
10826 if (puVector)
10827 *puVector = pVCpu->iem.s.uCurXcpt;
10828 if (pfFlags)
10829 *pfFlags = pVCpu->iem.s.fCurXcpt;
10830 if (puErr)
10831 *puErr = pVCpu->iem.s.uCurXcptErr;
10832 if (puCr2)
10833 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
10834 }
10835 return fRaisingXcpt;
10836}
10837
10838#ifdef IN_RING3
10839
10840/**
10841 * Handles the unlikely and probably fatal merge cases.
10842 *
10843 * @returns Merged status code.
10844 * @param rcStrict Current EM status code.
10845 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
10846 * with @a rcStrict.
10847 * @param iMemMap The memory mapping index. For error reporting only.
10848 * @param pVCpu The cross context virtual CPU structure of the calling
10849 * thread, for error reporting only.
10850 */
10851DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
10852 unsigned iMemMap, PVMCPUCC pVCpu)
10853{
10854 if (RT_FAILURE_NP(rcStrict))
10855 return rcStrict;
10856
10857 if (RT_FAILURE_NP(rcStrictCommit))
10858 return rcStrictCommit;
10859
10860 if (rcStrict == rcStrictCommit)
10861 return rcStrictCommit;
10862
10863 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
10864 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
10865 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
10866 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
10867 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
10868 return VERR_IOM_FF_STATUS_IPE;
10869}
10870
10871
10872/**
10873 * Helper for IOMR3ProcessForceFlag.
10874 *
10875 * @returns Merged status code.
10876 * @param rcStrict Current EM status code.
10877 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
10878 * with @a rcStrict.
10879 * @param iMemMap The memory mapping index. For error reporting only.
10880 * @param pVCpu The cross context virtual CPU structure of the calling
10881 * thread, for error reporting only.
10882 */
10883DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
10884{
10885 /* Simple. */
10886 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
10887 return rcStrictCommit;
10888
10889 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
10890 return rcStrict;
10891
10892 /* EM scheduling status codes. */
10893 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
10894 && rcStrict <= VINF_EM_LAST))
10895 {
10896 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
10897 && rcStrictCommit <= VINF_EM_LAST))
10898 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
10899 }
10900
10901 /* Unlikely */
10902 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
10903}
10904
10905
10906/**
10907 * Called by force-flag handling code when VMCPU_FF_IEM is set.
10908 *
10909 * @returns Merge between @a rcStrict and what the commit operation returned.
10910 * @param pVM The cross context VM structure.
10911 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10912 * @param rcStrict The status code returned by ring-0 or raw-mode.
10913 */
10914VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
10915{
10916 /*
10917 * Reset the pending commit.
10918 */
10919 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
10920 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
10921 ("%#x %#x %#x\n",
10922 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
10923 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
10924
10925 /*
10926 * Commit the pending bounce buffers (usually just one).
10927 */
10928 unsigned cBufs = 0;
10929 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
10930 while (iMemMap-- > 0)
10931 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
10932 {
10933 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
10934 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
10935 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
10936
10937 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
10938 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
10939 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
10940
10941 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
10942 {
10943 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
10944 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
10945 pbBuf,
10946 cbFirst,
10947 PGMACCESSORIGIN_IEM);
10948 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
10949 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
10950 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
10951 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
10952 }
10953
10954 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
10955 {
10956 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
10957 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
10958 pbBuf + cbFirst,
10959 cbSecond,
10960 PGMACCESSORIGIN_IEM);
10961 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
10962 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
10963 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
10964 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
10965 }
10966 cBufs++;
10967 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
10968 }
10969
10970 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
10971 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
10972 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
10973 pVCpu->iem.s.cActiveMappings = 0;
10974 return rcStrict;
10975}
10976
10977#endif /* IN_RING3 */
10978
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette