VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 102621

Last change on this file since 102621 was 102585, checked in by vboxsync, 14 months ago

VMM/IEM: Refactored the IEM_MC_SET_RIP_Uxx_AND_FINISH MCs in prep for native translation. bugref:10371

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 442.1 KB
Line 
1/* $Id: IEMAll.cpp 102585 2023-12-12 12:26:29Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_iem IEM - Interpreted Execution Manager
30 *
31 * The interpreted exeuction manager (IEM) is for executing short guest code
32 * sequences that are causing too many exits / virtualization traps. It will
33 * also be used to interpret single instructions, thus replacing the selective
34 * interpreters in EM and IOM.
35 *
36 * Design goals:
37 * - Relatively small footprint, although we favour speed and correctness
38 * over size.
39 * - Reasonably fast.
40 * - Correctly handle lock prefixed instructions.
41 * - Complete instruction set - eventually.
42 * - Refactorable into a recompiler, maybe.
43 * - Replace EMInterpret*.
44 *
45 * Using the existing disassembler has been considered, however this is thought
46 * to conflict with speed as the disassembler chews things a bit too much while
47 * leaving us with a somewhat complicated state to interpret afterwards.
48 *
49 *
50 * The current code is very much work in progress. You've been warned!
51 *
52 *
53 * @section sec_iem_fpu_instr FPU Instructions
54 *
55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
56 * same or equivalent instructions on the host FPU. To make life easy, we also
57 * let the FPU prioritize the unmasked exceptions for us. This however, only
58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
59 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
60 * can trigger spurious FPU exceptions.
61 *
62 * The guest FPU state is not loaded into the host CPU and kept there till we
63 * leave IEM because the calling conventions have declared an all year open
64 * season on much of the FPU state. For instance an innocent looking call to
65 * memcpy might end up using a whole bunch of XMM or MM registers if the
66 * particular implementation finds it worthwhile.
67 *
68 *
69 * @section sec_iem_logging Logging
70 *
71 * The IEM code uses the \"IEM\" log group for the main logging. The different
72 * logging levels/flags are generally used for the following purposes:
73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
74 * - Flow (LogFlow) : Basic enter/exit IEM state info.
75 * - Level 2 (Log2) : ?
76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
78 * - Level 5 (Log5) : Decoding details.
79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
80 * - Level 7 (Log7) : iret++ execution logging.
81 * - Level 8 (Log8) :
82 * - Level 9 (Log9) :
83 * - Level 10 (Log10): TLBs.
84 * - Level 11 (Log11): Unmasked FPU exceptions.
85 *
86 * The \"IEM_MEM\" log group covers most of memory related details logging,
87 * except for errors and exceptions:
88 * - Level 1 (Log) : Reads.
89 * - Level 2 (Log2) : Read fallbacks.
90 * - Level 3 (Log3) : MemMap read.
91 * - Level 4 (Log4) : MemMap read fallbacks.
92 * - Level 5 (Log5) : Writes
93 * - Level 6 (Log6) : Write fallbacks.
94 * - Level 7 (Log7) : MemMap writes and read-writes.
95 * - Level 8 (Log8) : MemMap write and read-write fallbacks.
96 * - Level 9 (Log9) : Stack reads.
97 * - Level 10 (Log10): Stack read fallbacks.
98 * - Level 11 (Log11): Stack writes.
99 * - Level 12 (Log12): Stack write fallbacks.
100 * - Flow (LogFlow) :
101 *
102 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
103 * - Level 1 (Log) : Errors and other major events.
104 * - Flow (LogFlow) : Misc flow stuff (cleanup?)
105 * - Level 2 (Log2) : VM exits.
106 *
107 * The syscall logging level assignments:
108 * - Level 1: DOS and BIOS.
109 * - Level 2: Windows 3.x
110 * - Level 3: Linux.
111 */
112
113/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
114#ifdef _MSC_VER
115# pragma warning(disable:4505)
116#endif
117
118
119/*********************************************************************************************************************************
120* Header Files *
121*********************************************************************************************************************************/
122#define LOG_GROUP LOG_GROUP_IEM
123#define VMCPU_INCL_CPUM_GST_CTX
124#include <VBox/vmm/iem.h>
125#include <VBox/vmm/cpum.h>
126#include <VBox/vmm/apic.h>
127#include <VBox/vmm/pdm.h>
128#include <VBox/vmm/pgm.h>
129#include <VBox/vmm/iom.h>
130#include <VBox/vmm/em.h>
131#include <VBox/vmm/hm.h>
132#include <VBox/vmm/nem.h>
133#include <VBox/vmm/gim.h>
134#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
135# include <VBox/vmm/em.h>
136# include <VBox/vmm/hm_svm.h>
137#endif
138#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
139# include <VBox/vmm/hmvmxinline.h>
140#endif
141#include <VBox/vmm/tm.h>
142#include <VBox/vmm/dbgf.h>
143#include <VBox/vmm/dbgftrace.h>
144#include "IEMInternal.h"
145#include <VBox/vmm/vmcc.h>
146#include <VBox/log.h>
147#include <VBox/err.h>
148#include <VBox/param.h>
149#include <VBox/dis.h>
150#include <iprt/asm-math.h>
151#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
152# include <iprt/asm-amd64-x86.h>
153#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
154# include <iprt/asm-arm.h>
155#endif
156#include <iprt/assert.h>
157#include <iprt/string.h>
158#include <iprt/x86.h>
159
160#include "IEMInline.h"
161
162
163/*********************************************************************************************************************************
164* Structures and Typedefs *
165*********************************************************************************************************************************/
166/**
167 * CPU exception classes.
168 */
169typedef enum IEMXCPTCLASS
170{
171 IEMXCPTCLASS_BENIGN,
172 IEMXCPTCLASS_CONTRIBUTORY,
173 IEMXCPTCLASS_PAGE_FAULT,
174 IEMXCPTCLASS_DOUBLE_FAULT
175} IEMXCPTCLASS;
176
177
178/*********************************************************************************************************************************
179* Global Variables *
180*********************************************************************************************************************************/
181#if defined(IEM_LOG_MEMORY_WRITES)
182/** What IEM just wrote. */
183uint8_t g_abIemWrote[256];
184/** How much IEM just wrote. */
185size_t g_cbIemWrote;
186#endif
187
188
189/*********************************************************************************************************************************
190* Internal Functions *
191*********************************************************************************************************************************/
192static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
193 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
194
195
196/**
197 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags, slow code
198 * path.
199 *
200 * @returns IEM_F_BRK_PENDING_XXX or zero.
201 * @param pVCpu The cross context virtual CPU structure of the
202 * calling thread.
203 *
204 * @note Don't call directly, use iemCalcExecDbgFlags instead.
205 */
206uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu)
207{
208 uint32_t fExec = 0;
209
210 /*
211 * Process guest breakpoints.
212 */
213#define PROCESS_ONE_BP(a_fDr7, a_iBp) do { \
214 if (a_fDr7 & X86_DR7_L_G(a_iBp)) \
215 { \
216 switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \
217 { \
218 case X86_DR7_RW_EO: \
219 fExec |= IEM_F_PENDING_BRK_INSTR; \
220 break; \
221 case X86_DR7_RW_WO: \
222 case X86_DR7_RW_RW: \
223 fExec |= IEM_F_PENDING_BRK_DATA; \
224 break; \
225 case X86_DR7_RW_IO: \
226 fExec |= IEM_F_PENDING_BRK_X86_IO; \
227 break; \
228 } \
229 } \
230 } while (0)
231
232 uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];
233 if (fGstDr7 & X86_DR7_ENABLED_MASK)
234 {
235 PROCESS_ONE_BP(fGstDr7, 0);
236 PROCESS_ONE_BP(fGstDr7, 1);
237 PROCESS_ONE_BP(fGstDr7, 2);
238 PROCESS_ONE_BP(fGstDr7, 3);
239 }
240
241 /*
242 * Process hypervisor breakpoints.
243 */
244 uint32_t const fHyperDr7 = DBGFBpGetDR7(pVCpu->CTX_SUFF(pVM));
245 if (fHyperDr7 & X86_DR7_ENABLED_MASK)
246 {
247 PROCESS_ONE_BP(fHyperDr7, 0);
248 PROCESS_ONE_BP(fHyperDr7, 1);
249 PROCESS_ONE_BP(fHyperDr7, 2);
250 PROCESS_ONE_BP(fHyperDr7, 3);
251 }
252
253 return fExec;
254}
255
256
257/**
258 * Initializes the decoder state.
259 *
260 * iemReInitDecoder is mostly a copy of this function.
261 *
262 * @param pVCpu The cross context virtual CPU structure of the
263 * calling thread.
264 * @param fExecOpts Optional execution flags:
265 * - IEM_F_BYPASS_HANDLERS
266 * - IEM_F_X86_DISREGARD_LOCK
267 */
268DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, uint32_t fExecOpts)
269{
270 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
271 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
272 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
273 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
274 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
275 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
276 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
277 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
278 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
279 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
280
281 /* Execution state: */
282 uint32_t fExec;
283 pVCpu->iem.s.fExec = fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
284
285 /* Decoder state: */
286 pVCpu->iem.s.enmDefAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
287 pVCpu->iem.s.enmEffAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK;
288 if ((fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)
289 {
290 pVCpu->iem.s.enmDefOpSize = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
291 pVCpu->iem.s.enmEffOpSize = fExec & IEM_F_MODE_CPUMODE_MASK;
292 }
293 else
294 {
295 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
296 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
297 }
298 pVCpu->iem.s.fPrefixes = 0;
299 pVCpu->iem.s.uRexReg = 0;
300 pVCpu->iem.s.uRexB = 0;
301 pVCpu->iem.s.uRexIndex = 0;
302 pVCpu->iem.s.idxPrefix = 0;
303 pVCpu->iem.s.uVex3rdReg = 0;
304 pVCpu->iem.s.uVexLength = 0;
305 pVCpu->iem.s.fEvexStuff = 0;
306 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
307#ifdef IEM_WITH_CODE_TLB
308 pVCpu->iem.s.pbInstrBuf = NULL;
309 pVCpu->iem.s.offInstrNextByte = 0;
310 pVCpu->iem.s.offCurInstrStart = 0;
311# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
312 pVCpu->iem.s.offOpcode = 0;
313# endif
314# ifdef VBOX_STRICT
315 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
316 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
317 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
318 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
319# endif
320#else
321 pVCpu->iem.s.offOpcode = 0;
322 pVCpu->iem.s.cbOpcode = 0;
323#endif
324 pVCpu->iem.s.offModRm = 0;
325 pVCpu->iem.s.cActiveMappings = 0;
326 pVCpu->iem.s.iNextMapping = 0;
327 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
328
329#ifdef DBGFTRACE_ENABLED
330 switch (IEM_GET_CPU_MODE(pVCpu))
331 {
332 case IEMMODE_64BIT:
333 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
334 break;
335 case IEMMODE_32BIT:
336 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
337 break;
338 case IEMMODE_16BIT:
339 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
340 break;
341 }
342#endif
343}
344
345
346/**
347 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
348 *
349 * This is mostly a copy of iemInitDecoder.
350 *
351 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
352 */
353DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
354{
355 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
356 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
357 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
358 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
359 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
360 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
361 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
362 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
363 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
364
365 /* ASSUMES: Anyone changing CPU state affecting the fExec bits will update them! */
366 AssertMsg((pVCpu->iem.s.fExec & ~IEM_F_USER_OPTS) == iemCalcExecFlags(pVCpu),
367 ("fExec=%#x iemCalcExecModeFlags=%#x\n", pVCpu->iem.s.fExec, iemCalcExecFlags(pVCpu)));
368
369 IEMMODE const enmMode = IEM_GET_CPU_MODE(pVCpu);
370 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
371 pVCpu->iem.s.enmEffAddrMode = enmMode;
372 if (enmMode != IEMMODE_64BIT)
373 {
374 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
375 pVCpu->iem.s.enmEffOpSize = enmMode;
376 }
377 else
378 {
379 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
380 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
381 }
382 pVCpu->iem.s.fPrefixes = 0;
383 pVCpu->iem.s.uRexReg = 0;
384 pVCpu->iem.s.uRexB = 0;
385 pVCpu->iem.s.uRexIndex = 0;
386 pVCpu->iem.s.idxPrefix = 0;
387 pVCpu->iem.s.uVex3rdReg = 0;
388 pVCpu->iem.s.uVexLength = 0;
389 pVCpu->iem.s.fEvexStuff = 0;
390 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
391#ifdef IEM_WITH_CODE_TLB
392 if (pVCpu->iem.s.pbInstrBuf)
393 {
394 uint64_t off = (enmMode == IEMMODE_64BIT
395 ? pVCpu->cpum.GstCtx.rip
396 : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
397 - pVCpu->iem.s.uInstrBufPc;
398 if (off < pVCpu->iem.s.cbInstrBufTotal)
399 {
400 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
401 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
402 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
403 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
404 else
405 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
406 }
407 else
408 {
409 pVCpu->iem.s.pbInstrBuf = NULL;
410 pVCpu->iem.s.offInstrNextByte = 0;
411 pVCpu->iem.s.offCurInstrStart = 0;
412 pVCpu->iem.s.cbInstrBuf = 0;
413 pVCpu->iem.s.cbInstrBufTotal = 0;
414 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
415 }
416 }
417 else
418 {
419 pVCpu->iem.s.offInstrNextByte = 0;
420 pVCpu->iem.s.offCurInstrStart = 0;
421 pVCpu->iem.s.cbInstrBuf = 0;
422 pVCpu->iem.s.cbInstrBufTotal = 0;
423# ifdef VBOX_STRICT
424 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
425# endif
426 }
427# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
428 pVCpu->iem.s.offOpcode = 0;
429# endif
430#else /* !IEM_WITH_CODE_TLB */
431 pVCpu->iem.s.cbOpcode = 0;
432 pVCpu->iem.s.offOpcode = 0;
433#endif /* !IEM_WITH_CODE_TLB */
434 pVCpu->iem.s.offModRm = 0;
435 Assert(pVCpu->iem.s.cActiveMappings == 0);
436 pVCpu->iem.s.iNextMapping = 0;
437 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
438 Assert(!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS));
439
440#ifdef DBGFTRACE_ENABLED
441 switch (enmMode)
442 {
443 case IEMMODE_64BIT:
444 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
445 break;
446 case IEMMODE_32BIT:
447 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
448 break;
449 case IEMMODE_16BIT:
450 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
451 break;
452 }
453#endif
454}
455
456
457
458/**
459 * Prefetch opcodes the first time when starting executing.
460 *
461 * @returns Strict VBox status code.
462 * @param pVCpu The cross context virtual CPU structure of the
463 * calling thread.
464 * @param fExecOpts Optional execution flags:
465 * - IEM_F_BYPASS_HANDLERS
466 * - IEM_F_X86_DISREGARD_LOCK
467 */
468static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
469{
470 iemInitDecoder(pVCpu, fExecOpts);
471
472#ifndef IEM_WITH_CODE_TLB
473 /*
474 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
475 *
476 * First translate CS:rIP to a physical address.
477 *
478 * Note! The iemOpcodeFetchMoreBytes code depends on this here code to fetch
479 * all relevant bytes from the first page, as it ASSUMES it's only ever
480 * called for dealing with CS.LIM, page crossing and instructions that
481 * are too long.
482 */
483 uint32_t cbToTryRead;
484 RTGCPTR GCPtrPC;
485 if (IEM_IS_64BIT_CODE(pVCpu))
486 {
487 cbToTryRead = GUEST_PAGE_SIZE;
488 GCPtrPC = pVCpu->cpum.GstCtx.rip;
489 if (IEM_IS_CANONICAL(GCPtrPC))
490 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
491 else
492 return iemRaiseGeneralProtectionFault0(pVCpu);
493 }
494 else
495 {
496 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
497 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu), ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
498 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
499 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
500 else
501 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
502 if (cbToTryRead) { /* likely */ }
503 else /* overflowed */
504 {
505 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
506 cbToTryRead = UINT32_MAX;
507 }
508 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
509 Assert(GCPtrPC <= UINT32_MAX);
510 }
511
512 PGMPTWALK Walk;
513 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk);
514 if (RT_SUCCESS(rc))
515 Assert(Walk.fSucceeded); /* probable. */
516 else
517 {
518 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
519# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
520 if (Walk.fFailed & PGM_WALKFAIL_EPT)
521 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
522# endif
523 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);
524 }
525 if ((Walk.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3) { /* likely */ }
526 else
527 {
528 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
529# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
530 if (Walk.fFailed & PGM_WALKFAIL_EPT)
531 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
532# endif
533 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
534 }
535 if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
536 else
537 {
538 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
539# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
540 if (Walk.fFailed & PGM_WALKFAIL_EPT)
541 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
542# endif
543 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
544 }
545 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
546 /** @todo Check reserved bits and such stuff. PGM is better at doing
547 * that, so do it when implementing the guest virtual address
548 * TLB... */
549
550 /*
551 * Read the bytes at this address.
552 */
553 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
554 if (cbToTryRead > cbLeftOnPage)
555 cbToTryRead = cbLeftOnPage;
556 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
557 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
558
559 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
560 {
561 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
562 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
563 { /* likely */ }
564 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
565 {
566 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
567 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
568 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
569 }
570 else
571 {
572 Log((RT_SUCCESS(rcStrict)
573 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
574 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
575 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
576 return rcStrict;
577 }
578 }
579 else
580 {
581 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
582 if (RT_SUCCESS(rc))
583 { /* likely */ }
584 else
585 {
586 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
587 GCPtrPC, GCPhys, rc, cbToTryRead));
588 return rc;
589 }
590 }
591 pVCpu->iem.s.cbOpcode = cbToTryRead;
592#endif /* !IEM_WITH_CODE_TLB */
593 return VINF_SUCCESS;
594}
595
596
597/**
598 * Invalidates the IEM TLBs.
599 *
600 * This is called internally as well as by PGM when moving GC mappings.
601 *
602 * @param pVCpu The cross context virtual CPU structure of the calling
603 * thread.
604 */
605VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
606{
607#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
608 Log10(("IEMTlbInvalidateAll\n"));
609# ifdef IEM_WITH_CODE_TLB
610 pVCpu->iem.s.cbInstrBufTotal = 0;
611 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
612 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
613 { /* very likely */ }
614 else
615 {
616 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
617 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
618 while (i-- > 0)
619 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
620 }
621# endif
622
623# ifdef IEM_WITH_DATA_TLB
624 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
625 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
626 { /* very likely */ }
627 else
628 {
629 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
630 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
631 while (i-- > 0)
632 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
633 }
634# endif
635#else
636 RT_NOREF(pVCpu);
637#endif
638}
639
640
641/**
642 * Invalidates a page in the TLBs.
643 *
644 * @param pVCpu The cross context virtual CPU structure of the calling
645 * thread.
646 * @param GCPtr The address of the page to invalidate
647 * @thread EMT(pVCpu)
648 */
649VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
650{
651#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
652 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
653 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
654 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
655 uintptr_t const idx = IEMTLB_TAG_TO_INDEX(GCPtr);
656
657# ifdef IEM_WITH_CODE_TLB
658 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
659 {
660 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
661 if (GCPtr == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
662 pVCpu->iem.s.cbInstrBufTotal = 0;
663 }
664# endif
665
666# ifdef IEM_WITH_DATA_TLB
667 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
668 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
669# endif
670#else
671 NOREF(pVCpu); NOREF(GCPtr);
672#endif
673}
674
675
676#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
677/**
678 * Invalid both TLBs slow fashion following a rollover.
679 *
680 * Worker for IEMTlbInvalidateAllPhysical,
681 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
682 * iemMemMapJmp and others.
683 *
684 * @thread EMT(pVCpu)
685 */
686static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
687{
688 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
689 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
690 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
691
692 unsigned i;
693# ifdef IEM_WITH_CODE_TLB
694 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
695 while (i-- > 0)
696 {
697 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
698 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
699 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
700 }
701# endif
702# ifdef IEM_WITH_DATA_TLB
703 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
704 while (i-- > 0)
705 {
706 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
707 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
708 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
709 }
710# endif
711
712}
713#endif
714
715
716/**
717 * Invalidates the host physical aspects of the IEM TLBs.
718 *
719 * This is called internally as well as by PGM when moving GC mappings.
720 *
721 * @param pVCpu The cross context virtual CPU structure of the calling
722 * thread.
723 * @note Currently not used.
724 */
725VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
726{
727#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
728 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
729 Log10(("IEMTlbInvalidateAllPhysical\n"));
730
731# ifdef IEM_WITH_CODE_TLB
732 pVCpu->iem.s.cbInstrBufTotal = 0;
733# endif
734 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
735 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
736 {
737 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
738 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
739 }
740 else
741 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
742#else
743 NOREF(pVCpu);
744#endif
745}
746
747
748/**
749 * Invalidates the host physical aspects of the IEM TLBs.
750 *
751 * This is called internally as well as by PGM when moving GC mappings.
752 *
753 * @param pVM The cross context VM structure.
754 * @param idCpuCaller The ID of the calling EMT if available to the caller,
755 * otherwise NIL_VMCPUID.
756 * @param enmReason The reason we're called.
757 *
758 * @remarks Caller holds the PGM lock.
759 */
760VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller, IEMTLBPHYSFLUSHREASON enmReason)
761{
762#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
763 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
764 if (pVCpuCaller)
765 VMCPU_ASSERT_EMT(pVCpuCaller);
766 Log10(("IEMTlbInvalidateAllPhysicalAllCpus: %d\n", enmReason)); RT_NOREF(enmReason);
767
768 VMCC_FOR_EACH_VMCPU(pVM)
769 {
770# ifdef IEM_WITH_CODE_TLB
771 if (pVCpuCaller == pVCpu)
772 pVCpu->iem.s.cbInstrBufTotal = 0;
773# endif
774
775 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
776 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
777 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
778 { /* likely */}
779 else if (pVCpuCaller == pVCpu)
780 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
781 else
782 {
783 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
784 continue;
785 }
786 ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
787 ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
788 }
789 VMCC_FOR_EACH_VMCPU_END(pVM);
790
791#else
792 RT_NOREF(pVM, idCpuCaller, enmReason);
793#endif
794}
795
796
797/**
798 * Flushes the prefetch buffer, light version.
799 */
800void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr)
801{
802#ifndef IEM_WITH_CODE_TLB
803 pVCpu->iem.s.cbOpcode = cbInstr;
804#else
805 RT_NOREF(pVCpu, cbInstr);
806#endif
807}
808
809
810/**
811 * Flushes the prefetch buffer, heavy version.
812 */
813void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr)
814{
815#ifndef IEM_WITH_CODE_TLB
816 pVCpu->iem.s.cbOpcode = cbInstr; /* Note! SVM and VT-x may set this to zero on exit, rather than the instruction length. */
817#elif 1
818 pVCpu->iem.s.pbInstrBuf = NULL;
819 pVCpu->iem.s.cbInstrBufTotal = 0;
820 RT_NOREF(cbInstr);
821#else
822 RT_NOREF(pVCpu, cbInstr);
823#endif
824}
825
826
827
828#ifdef IEM_WITH_CODE_TLB
829
830/**
831 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
832 * failure and jumps.
833 *
834 * We end up here for a number of reasons:
835 * - pbInstrBuf isn't yet initialized.
836 * - Advancing beyond the buffer boundrary (e.g. cross page).
837 * - Advancing beyond the CS segment limit.
838 * - Fetching from non-mappable page (e.g. MMIO).
839 *
840 * @param pVCpu The cross context virtual CPU structure of the
841 * calling thread.
842 * @param pvDst Where to return the bytes.
843 * @param cbDst Number of bytes to read. A value of zero is
844 * allowed for initializing pbInstrBuf (the
845 * recompiler does this). In this case it is best
846 * to set pbInstrBuf to NULL prior to the call.
847 */
848void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP
849{
850# ifdef IN_RING3
851 for (;;)
852 {
853 Assert(cbDst <= 8);
854 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
855
856 /*
857 * We might have a partial buffer match, deal with that first to make the
858 * rest simpler. This is the first part of the cross page/buffer case.
859 */
860 if (pVCpu->iem.s.pbInstrBuf != NULL)
861 {
862 if (offBuf < pVCpu->iem.s.cbInstrBuf)
863 {
864 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
865 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
866 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
867
868 cbDst -= cbCopy;
869 pvDst = (uint8_t *)pvDst + cbCopy;
870 offBuf += cbCopy;
871 pVCpu->iem.s.offInstrNextByte += offBuf;
872 }
873 }
874
875 /*
876 * Check segment limit, figuring how much we're allowed to access at this point.
877 *
878 * We will fault immediately if RIP is past the segment limit / in non-canonical
879 * territory. If we do continue, there are one or more bytes to read before we
880 * end up in trouble and we need to do that first before faulting.
881 */
882 RTGCPTR GCPtrFirst;
883 uint32_t cbMaxRead;
884 if (IEM_IS_64BIT_CODE(pVCpu))
885 {
886 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
887 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
888 { /* likely */ }
889 else
890 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
891 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
892 }
893 else
894 {
895 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
896 /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
897 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
898 { /* likely */ }
899 else /** @todo For CPUs older than the 386, we should not necessarily generate \#GP here but wrap around! */
900 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
901 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
902 if (cbMaxRead != 0)
903 { /* likely */ }
904 else
905 {
906 /* Overflowed because address is 0 and limit is max. */
907 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
908 cbMaxRead = X86_PAGE_SIZE;
909 }
910 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
911 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
912 if (cbMaxRead2 < cbMaxRead)
913 cbMaxRead = cbMaxRead2;
914 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
915 }
916
917 /*
918 * Get the TLB entry for this piece of code.
919 */
920 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.CodeTlb, GCPtrFirst);
921 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.CodeTlb, uTag);
922 if (pTlbe->uTag == uTag)
923 {
924 /* likely when executing lots of code, otherwise unlikely */
925# ifdef VBOX_WITH_STATISTICS
926 pVCpu->iem.s.CodeTlb.cTlbHits++;
927# endif
928 }
929 else
930 {
931 pVCpu->iem.s.CodeTlb.cTlbMisses++;
932 PGMPTWALK Walk;
933 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk);
934 if (RT_FAILURE(rc))
935 {
936#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
937 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? */
938 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
939#endif
940 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
941 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, rc);
942 }
943
944 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
945 Assert(Walk.fSucceeded);
946 pTlbe->uTag = uTag;
947 pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
948 | (Walk.fEffective >> X86_PTE_PAE_BIT_NX);
949 pTlbe->GCPhys = Walk.GCPhys;
950 pTlbe->pbMappingR3 = NULL;
951 }
952
953 /*
954 * Check TLB page table level access flags.
955 */
956 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
957 {
958 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && IEM_GET_CPL(pVCpu) == 3)
959 {
960 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
961 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
962 }
963 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
964 {
965 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
966 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
967 }
968 }
969
970 /*
971 * Look up the physical page info if necessary.
972 */
973 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
974 { /* not necessary */ }
975 else
976 {
977 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
978 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
979 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
980 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
981 AssertCompile(PGMIEMGCPHYS2PTR_F_CODE_PAGE == IEMTLBE_F_PG_CODE_PAGE);
982 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
983 { /* likely */ }
984 else
985 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
986 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
987 | IEMTLBE_F_NO_MAPPINGR3
988 | IEMTLBE_F_PG_NO_READ
989 | IEMTLBE_F_PG_NO_WRITE
990 | IEMTLBE_F_PG_UNASSIGNED
991 | IEMTLBE_F_PG_CODE_PAGE);
992 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
993 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
994 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
995 }
996
997# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
998 /*
999 * Try do a direct read using the pbMappingR3 pointer.
1000 */
1001 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1002 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1003 {
1004 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1005 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1006 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1007 {
1008 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1009 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1010 }
1011 else
1012 {
1013 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1014 if (cbInstr + (uint32_t)cbDst <= 15)
1015 {
1016 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1017 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1018 }
1019 else
1020 {
1021 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1022 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1023 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1024 }
1025 }
1026 if (cbDst <= cbMaxRead)
1027 {
1028 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0; /** @todo Spurious load effect on branch handling? */
1029 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1030
1031 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1032 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1033 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1034 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1035 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1036 return;
1037 }
1038 pVCpu->iem.s.pbInstrBuf = NULL;
1039
1040 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1041 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1042 }
1043# else
1044# error "refactor as needed"
1045 /*
1046 * If there is no special read handling, so we can read a bit more and
1047 * put it in the prefetch buffer.
1048 */
1049 if ( cbDst < cbMaxRead
1050 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1051 {
1052 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1053 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1054 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1055 { /* likely */ }
1056 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1057 {
1058 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1059 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1060 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1061 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));
1062 }
1063 else
1064 {
1065 Log((RT_SUCCESS(rcStrict)
1066 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1067 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1068 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1069 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1070 }
1071 }
1072# endif
1073 /*
1074 * Special read handling, so only read exactly what's needed.
1075 * This is a highly unlikely scenario.
1076 */
1077 else
1078 {
1079 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1080
1081 /* Check instruction length. */
1082 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1083 if (RT_LIKELY(cbInstr + cbDst <= 15))
1084 { /* likely */ }
1085 else
1086 {
1087 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0) [slow]\n",
1088 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1089 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1090 }
1091
1092 /* Do the reading. */
1093 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1094 if (cbToRead > 0)
1095 {
1096 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1097 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1098 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1099 { /* likely */ }
1100 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1101 {
1102 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1103 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1104 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1105 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));
1106 }
1107 else
1108 {
1109 Log((RT_SUCCESS(rcStrict)
1110 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1111 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1112 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1113 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1114 }
1115 }
1116
1117 /* Update the state and probably return. */
1118 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1119 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0;
1120 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1121
1122 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1123 pVCpu->iem.s.offInstrNextByte = offPg + cbInstr + cbToRead;
1124 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead + cbInstr) - cbToRead - cbInstr;
1125 pVCpu->iem.s.cbInstrBufTotal = X86_PAGE_SIZE; /** @todo ??? */
1126 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1127 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1128 pVCpu->iem.s.pbInstrBuf = NULL;
1129 if (cbToRead == cbDst)
1130 return;
1131 }
1132
1133 /*
1134 * More to read, loop.
1135 */
1136 cbDst -= cbMaxRead;
1137 pvDst = (uint8_t *)pvDst + cbMaxRead;
1138 }
1139# else /* !IN_RING3 */
1140 RT_NOREF(pvDst, cbDst);
1141 if (pvDst || cbDst)
1142 IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);
1143# endif /* !IN_RING3 */
1144}
1145
1146#else /* !IEM_WITH_CODE_TLB */
1147
1148/**
1149 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1150 * exception if it fails.
1151 *
1152 * @returns Strict VBox status code.
1153 * @param pVCpu The cross context virtual CPU structure of the
1154 * calling thread.
1155 * @param cbMin The minimum number of bytes relative offOpcode
1156 * that must be read.
1157 */
1158VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
1159{
1160 /*
1161 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1162 *
1163 * First translate CS:rIP to a physical address.
1164 */
1165 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;
1166 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1167 uint8_t const cbLeft = cbOpcode - offOpcode;
1168 Assert(cbLeft < cbMin);
1169 Assert(cbOpcode <= sizeof(pVCpu->iem.s.abOpcode));
1170
1171 uint32_t cbToTryRead;
1172 RTGCPTR GCPtrNext;
1173 if (IEM_IS_64BIT_CODE(pVCpu))
1174 {
1175 GCPtrNext = pVCpu->cpum.GstCtx.rip + cbOpcode;
1176 if (!IEM_IS_CANONICAL(GCPtrNext))
1177 return iemRaiseGeneralProtectionFault0(pVCpu);
1178 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1179 }
1180 else
1181 {
1182 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1183 /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
1184 GCPtrNext32 += cbOpcode;
1185 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1186 /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
1187 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1188 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1189 if (!cbToTryRead) /* overflowed */
1190 {
1191 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1192 cbToTryRead = UINT32_MAX;
1193 /** @todo check out wrapping around the code segment. */
1194 }
1195 if (cbToTryRead < cbMin - cbLeft)
1196 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1197 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1198
1199 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1200 if (cbToTryRead > cbLeftOnPage)
1201 cbToTryRead = cbLeftOnPage;
1202 }
1203
1204 /* Restrict to opcode buffer space.
1205
1206 We're making ASSUMPTIONS here based on work done previously in
1207 iemInitDecoderAndPrefetchOpcodes, where bytes from the first page will
1208 be fetched in case of an instruction crossing two pages. */
1209 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - cbOpcode)
1210 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - cbOpcode;
1211 if (RT_LIKELY(cbToTryRead + cbLeft >= cbMin))
1212 { /* likely */ }
1213 else
1214 {
1215 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1216 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, offOpcode, cbMin));
1217 return iemRaiseGeneralProtectionFault0(pVCpu);
1218 }
1219
1220 PGMPTWALK Walk;
1221 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk);
1222 if (RT_FAILURE(rc))
1223 {
1224 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1225#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1226 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1227 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1228#endif
1229 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc);
1230 }
1231 if (!(Walk.fEffective & X86_PTE_US) && IEM_GET_CPL(pVCpu) == 3)
1232 {
1233 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1234#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1235 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1236 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1237#endif
1238 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1239 }
1240 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1241 {
1242 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1243#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1244 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1245 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1246#endif
1247 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1248 }
1249 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1250 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode));
1251 /** @todo Check reserved bits and such stuff. PGM is better at doing
1252 * that, so do it when implementing the guest virtual address
1253 * TLB... */
1254
1255 /*
1256 * Read the bytes at this address.
1257 *
1258 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1259 * and since PATM should only patch the start of an instruction there
1260 * should be no need to check again here.
1261 */
1262 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
1263 {
1264 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[cbOpcode],
1265 cbToTryRead, PGMACCESSORIGIN_IEM);
1266 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1267 { /* likely */ }
1268 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1269 {
1270 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1271 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1272 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1273 }
1274 else
1275 {
1276 Log((RT_SUCCESS(rcStrict)
1277 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1278 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1279 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1280 return rcStrict;
1281 }
1282 }
1283 else
1284 {
1285 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[cbOpcode], GCPhys, cbToTryRead);
1286 if (RT_SUCCESS(rc))
1287 { /* likely */ }
1288 else
1289 {
1290 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1291 return rc;
1292 }
1293 }
1294 pVCpu->iem.s.cbOpcode = cbOpcode + cbToTryRead;
1295 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1296
1297 return VINF_SUCCESS;
1298}
1299
1300#endif /* !IEM_WITH_CODE_TLB */
1301#ifndef IEM_WITH_SETJMP
1302
1303/**
1304 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1305 *
1306 * @returns Strict VBox status code.
1307 * @param pVCpu The cross context virtual CPU structure of the
1308 * calling thread.
1309 * @param pb Where to return the opcode byte.
1310 */
1311VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1312{
1313 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1314 if (rcStrict == VINF_SUCCESS)
1315 {
1316 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1317 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1318 pVCpu->iem.s.offOpcode = offOpcode + 1;
1319 }
1320 else
1321 *pb = 0;
1322 return rcStrict;
1323}
1324
1325#else /* IEM_WITH_SETJMP */
1326
1327/**
1328 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1329 *
1330 * @returns The opcode byte.
1331 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1332 */
1333uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1334{
1335# ifdef IEM_WITH_CODE_TLB
1336 uint8_t u8;
1337 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1338 return u8;
1339# else
1340 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1341 if (rcStrict == VINF_SUCCESS)
1342 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1343 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1344# endif
1345}
1346
1347#endif /* IEM_WITH_SETJMP */
1348
1349#ifndef IEM_WITH_SETJMP
1350
1351/**
1352 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1353 *
1354 * @returns Strict VBox status code.
1355 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1356 * @param pu16 Where to return the opcode dword.
1357 */
1358VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1359{
1360 uint8_t u8;
1361 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1362 if (rcStrict == VINF_SUCCESS)
1363 *pu16 = (int8_t)u8;
1364 return rcStrict;
1365}
1366
1367
1368/**
1369 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1370 *
1371 * @returns Strict VBox status code.
1372 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1373 * @param pu32 Where to return the opcode dword.
1374 */
1375VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1376{
1377 uint8_t u8;
1378 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1379 if (rcStrict == VINF_SUCCESS)
1380 *pu32 = (int8_t)u8;
1381 return rcStrict;
1382}
1383
1384
1385/**
1386 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1387 *
1388 * @returns Strict VBox status code.
1389 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1390 * @param pu64 Where to return the opcode qword.
1391 */
1392VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1393{
1394 uint8_t u8;
1395 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1396 if (rcStrict == VINF_SUCCESS)
1397 *pu64 = (int8_t)u8;
1398 return rcStrict;
1399}
1400
1401#endif /* !IEM_WITH_SETJMP */
1402
1403
1404#ifndef IEM_WITH_SETJMP
1405
1406/**
1407 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1408 *
1409 * @returns Strict VBox status code.
1410 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1411 * @param pu16 Where to return the opcode word.
1412 */
1413VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1414{
1415 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1416 if (rcStrict == VINF_SUCCESS)
1417 {
1418 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1419# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1420 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1421# else
1422 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1423# endif
1424 pVCpu->iem.s.offOpcode = offOpcode + 2;
1425 }
1426 else
1427 *pu16 = 0;
1428 return rcStrict;
1429}
1430
1431#else /* IEM_WITH_SETJMP */
1432
1433/**
1434 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1435 *
1436 * @returns The opcode word.
1437 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1438 */
1439uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1440{
1441# ifdef IEM_WITH_CODE_TLB
1442 uint16_t u16;
1443 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1444 return u16;
1445# else
1446 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1447 if (rcStrict == VINF_SUCCESS)
1448 {
1449 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1450 pVCpu->iem.s.offOpcode += 2;
1451# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1452 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1453# else
1454 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1455# endif
1456 }
1457 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1458# endif
1459}
1460
1461#endif /* IEM_WITH_SETJMP */
1462
1463#ifndef IEM_WITH_SETJMP
1464
1465/**
1466 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1467 *
1468 * @returns Strict VBox status code.
1469 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1470 * @param pu32 Where to return the opcode double word.
1471 */
1472VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1473{
1474 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1475 if (rcStrict == VINF_SUCCESS)
1476 {
1477 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1478 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1479 pVCpu->iem.s.offOpcode = offOpcode + 2;
1480 }
1481 else
1482 *pu32 = 0;
1483 return rcStrict;
1484}
1485
1486
1487/**
1488 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1489 *
1490 * @returns Strict VBox status code.
1491 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1492 * @param pu64 Where to return the opcode quad word.
1493 */
1494VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1495{
1496 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1497 if (rcStrict == VINF_SUCCESS)
1498 {
1499 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1500 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1501 pVCpu->iem.s.offOpcode = offOpcode + 2;
1502 }
1503 else
1504 *pu64 = 0;
1505 return rcStrict;
1506}
1507
1508#endif /* !IEM_WITH_SETJMP */
1509
1510#ifndef IEM_WITH_SETJMP
1511
1512/**
1513 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1514 *
1515 * @returns Strict VBox status code.
1516 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1517 * @param pu32 Where to return the opcode dword.
1518 */
1519VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1520{
1521 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1522 if (rcStrict == VINF_SUCCESS)
1523 {
1524 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1525# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1526 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1527# else
1528 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1529 pVCpu->iem.s.abOpcode[offOpcode + 1],
1530 pVCpu->iem.s.abOpcode[offOpcode + 2],
1531 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1532# endif
1533 pVCpu->iem.s.offOpcode = offOpcode + 4;
1534 }
1535 else
1536 *pu32 = 0;
1537 return rcStrict;
1538}
1539
1540#else /* IEM_WITH_SETJMP */
1541
1542/**
1543 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1544 *
1545 * @returns The opcode dword.
1546 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1547 */
1548uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1549{
1550# ifdef IEM_WITH_CODE_TLB
1551 uint32_t u32;
1552 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1553 return u32;
1554# else
1555 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1556 if (rcStrict == VINF_SUCCESS)
1557 {
1558 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1559 pVCpu->iem.s.offOpcode = offOpcode + 4;
1560# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1561 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1562# else
1563 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1564 pVCpu->iem.s.abOpcode[offOpcode + 1],
1565 pVCpu->iem.s.abOpcode[offOpcode + 2],
1566 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1567# endif
1568 }
1569 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1570# endif
1571}
1572
1573#endif /* IEM_WITH_SETJMP */
1574
1575#ifndef IEM_WITH_SETJMP
1576
1577/**
1578 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1579 *
1580 * @returns Strict VBox status code.
1581 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1582 * @param pu64 Where to return the opcode dword.
1583 */
1584VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1585{
1586 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1587 if (rcStrict == VINF_SUCCESS)
1588 {
1589 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1590 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1591 pVCpu->iem.s.abOpcode[offOpcode + 1],
1592 pVCpu->iem.s.abOpcode[offOpcode + 2],
1593 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1594 pVCpu->iem.s.offOpcode = offOpcode + 4;
1595 }
1596 else
1597 *pu64 = 0;
1598 return rcStrict;
1599}
1600
1601
1602/**
1603 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1604 *
1605 * @returns Strict VBox status code.
1606 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1607 * @param pu64 Where to return the opcode qword.
1608 */
1609VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1610{
1611 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1612 if (rcStrict == VINF_SUCCESS)
1613 {
1614 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1615 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1616 pVCpu->iem.s.abOpcode[offOpcode + 1],
1617 pVCpu->iem.s.abOpcode[offOpcode + 2],
1618 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1619 pVCpu->iem.s.offOpcode = offOpcode + 4;
1620 }
1621 else
1622 *pu64 = 0;
1623 return rcStrict;
1624}
1625
1626#endif /* !IEM_WITH_SETJMP */
1627
1628#ifndef IEM_WITH_SETJMP
1629
1630/**
1631 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1632 *
1633 * @returns Strict VBox status code.
1634 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1635 * @param pu64 Where to return the opcode qword.
1636 */
1637VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1638{
1639 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1640 if (rcStrict == VINF_SUCCESS)
1641 {
1642 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1643# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1644 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1645# else
1646 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1647 pVCpu->iem.s.abOpcode[offOpcode + 1],
1648 pVCpu->iem.s.abOpcode[offOpcode + 2],
1649 pVCpu->iem.s.abOpcode[offOpcode + 3],
1650 pVCpu->iem.s.abOpcode[offOpcode + 4],
1651 pVCpu->iem.s.abOpcode[offOpcode + 5],
1652 pVCpu->iem.s.abOpcode[offOpcode + 6],
1653 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1654# endif
1655 pVCpu->iem.s.offOpcode = offOpcode + 8;
1656 }
1657 else
1658 *pu64 = 0;
1659 return rcStrict;
1660}
1661
1662#else /* IEM_WITH_SETJMP */
1663
1664/**
1665 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1666 *
1667 * @returns The opcode qword.
1668 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1669 */
1670uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1671{
1672# ifdef IEM_WITH_CODE_TLB
1673 uint64_t u64;
1674 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1675 return u64;
1676# else
1677 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1678 if (rcStrict == VINF_SUCCESS)
1679 {
1680 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1681 pVCpu->iem.s.offOpcode = offOpcode + 8;
1682# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1683 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1684# else
1685 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1686 pVCpu->iem.s.abOpcode[offOpcode + 1],
1687 pVCpu->iem.s.abOpcode[offOpcode + 2],
1688 pVCpu->iem.s.abOpcode[offOpcode + 3],
1689 pVCpu->iem.s.abOpcode[offOpcode + 4],
1690 pVCpu->iem.s.abOpcode[offOpcode + 5],
1691 pVCpu->iem.s.abOpcode[offOpcode + 6],
1692 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1693# endif
1694 }
1695 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1696# endif
1697}
1698
1699#endif /* IEM_WITH_SETJMP */
1700
1701
1702
1703/** @name Misc Worker Functions.
1704 * @{
1705 */
1706
1707/**
1708 * Gets the exception class for the specified exception vector.
1709 *
1710 * @returns The class of the specified exception.
1711 * @param uVector The exception vector.
1712 */
1713static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
1714{
1715 Assert(uVector <= X86_XCPT_LAST);
1716 switch (uVector)
1717 {
1718 case X86_XCPT_DE:
1719 case X86_XCPT_TS:
1720 case X86_XCPT_NP:
1721 case X86_XCPT_SS:
1722 case X86_XCPT_GP:
1723 case X86_XCPT_SX: /* AMD only */
1724 return IEMXCPTCLASS_CONTRIBUTORY;
1725
1726 case X86_XCPT_PF:
1727 case X86_XCPT_VE: /* Intel only */
1728 return IEMXCPTCLASS_PAGE_FAULT;
1729
1730 case X86_XCPT_DF:
1731 return IEMXCPTCLASS_DOUBLE_FAULT;
1732 }
1733 return IEMXCPTCLASS_BENIGN;
1734}
1735
1736
1737/**
1738 * Evaluates how to handle an exception caused during delivery of another event
1739 * (exception / interrupt).
1740 *
1741 * @returns How to handle the recursive exception.
1742 * @param pVCpu The cross context virtual CPU structure of the
1743 * calling thread.
1744 * @param fPrevFlags The flags of the previous event.
1745 * @param uPrevVector The vector of the previous event.
1746 * @param fCurFlags The flags of the current exception.
1747 * @param uCurVector The vector of the current exception.
1748 * @param pfXcptRaiseInfo Where to store additional information about the
1749 * exception condition. Optional.
1750 */
1751VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
1752 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
1753{
1754 /*
1755 * Only CPU exceptions can be raised while delivering other events, software interrupt
1756 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
1757 */
1758 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
1759 Assert(pVCpu); RT_NOREF(pVCpu);
1760 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
1761
1762 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
1763 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
1764 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1765 {
1766 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
1767 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
1768 {
1769 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
1770 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
1771 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
1772 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
1773 {
1774 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1775 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
1776 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
1777 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
1778 uCurVector, pVCpu->cpum.GstCtx.cr2));
1779 }
1780 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1781 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
1782 {
1783 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1784 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
1785 }
1786 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
1787 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1788 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
1789 {
1790 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
1791 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
1792 }
1793 }
1794 else
1795 {
1796 if (uPrevVector == X86_XCPT_NMI)
1797 {
1798 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
1799 if (uCurVector == X86_XCPT_PF)
1800 {
1801 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
1802 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
1803 }
1804 }
1805 else if ( uPrevVector == X86_XCPT_AC
1806 && uCurVector == X86_XCPT_AC)
1807 {
1808 enmRaise = IEMXCPTRAISE_CPU_HANG;
1809 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
1810 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
1811 }
1812 }
1813 }
1814 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
1815 {
1816 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
1817 if (uCurVector == X86_XCPT_PF)
1818 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
1819 }
1820 else
1821 {
1822 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
1823 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
1824 }
1825
1826 if (pfXcptRaiseInfo)
1827 *pfXcptRaiseInfo = fRaiseInfo;
1828 return enmRaise;
1829}
1830
1831
1832/**
1833 * Enters the CPU shutdown state initiated by a triple fault or other
1834 * unrecoverable conditions.
1835 *
1836 * @returns Strict VBox status code.
1837 * @param pVCpu The cross context virtual CPU structure of the
1838 * calling thread.
1839 */
1840static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
1841{
1842 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1843 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
1844
1845 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
1846 {
1847 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
1848 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1849 }
1850
1851 RT_NOREF(pVCpu);
1852 return VINF_EM_TRIPLE_FAULT;
1853}
1854
1855
1856/**
1857 * Validates a new SS segment.
1858 *
1859 * @returns VBox strict status code.
1860 * @param pVCpu The cross context virtual CPU structure of the
1861 * calling thread.
1862 * @param NewSS The new SS selctor.
1863 * @param uCpl The CPL to load the stack for.
1864 * @param pDesc Where to return the descriptor.
1865 */
1866static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
1867{
1868 /* Null selectors are not allowed (we're not called for dispatching
1869 interrupts with SS=0 in long mode). */
1870 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1871 {
1872 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
1873 return iemRaiseTaskSwitchFault0(pVCpu);
1874 }
1875
1876 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1877 if ((NewSS & X86_SEL_RPL) != uCpl)
1878 {
1879 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1880 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1881 }
1882
1883 /*
1884 * Read the descriptor.
1885 */
1886 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
1887 if (rcStrict != VINF_SUCCESS)
1888 return rcStrict;
1889
1890 /*
1891 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1892 */
1893 if (!pDesc->Legacy.Gen.u1DescType)
1894 {
1895 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1896 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1897 }
1898
1899 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1900 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1901 {
1902 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1903 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1904 }
1905 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1906 {
1907 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1908 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1909 }
1910
1911 /* Is it there? */
1912 /** @todo testcase: Is this checked before the canonical / limit check below? */
1913 if (!pDesc->Legacy.Gen.u1Present)
1914 {
1915 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1916 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
1917 }
1918
1919 return VINF_SUCCESS;
1920}
1921
1922/** @} */
1923
1924
1925/** @name Raising Exceptions.
1926 *
1927 * @{
1928 */
1929
1930
1931/**
1932 * Loads the specified stack far pointer from the TSS.
1933 *
1934 * @returns VBox strict status code.
1935 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1936 * @param uCpl The CPL to load the stack for.
1937 * @param pSelSS Where to return the new stack segment.
1938 * @param puEsp Where to return the new stack pointer.
1939 */
1940static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
1941{
1942 VBOXSTRICTRC rcStrict;
1943 Assert(uCpl < 4);
1944
1945 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1946 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
1947 {
1948 /*
1949 * 16-bit TSS (X86TSS16).
1950 */
1951 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1952 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1953 {
1954 uint32_t off = uCpl * 4 + 2;
1955 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1956 {
1957 /** @todo check actual access pattern here. */
1958 uint32_t u32Tmp = 0; /* gcc maybe... */
1959 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1960 if (rcStrict == VINF_SUCCESS)
1961 {
1962 *puEsp = RT_LOWORD(u32Tmp);
1963 *pSelSS = RT_HIWORD(u32Tmp);
1964 return VINF_SUCCESS;
1965 }
1966 }
1967 else
1968 {
1969 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1970 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1971 }
1972 break;
1973 }
1974
1975 /*
1976 * 32-bit TSS (X86TSS32).
1977 */
1978 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1979 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1980 {
1981 uint32_t off = uCpl * 8 + 4;
1982 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1983 {
1984/** @todo check actual access pattern here. */
1985 uint64_t u64Tmp;
1986 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1987 if (rcStrict == VINF_SUCCESS)
1988 {
1989 *puEsp = u64Tmp & UINT32_MAX;
1990 *pSelSS = (RTSEL)(u64Tmp >> 32);
1991 return VINF_SUCCESS;
1992 }
1993 }
1994 else
1995 {
1996 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1997 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1998 }
1999 break;
2000 }
2001
2002 default:
2003 AssertFailed();
2004 rcStrict = VERR_IEM_IPE_4;
2005 break;
2006 }
2007
2008 *puEsp = 0; /* make gcc happy */
2009 *pSelSS = 0; /* make gcc happy */
2010 return rcStrict;
2011}
2012
2013
2014/**
2015 * Loads the specified stack pointer from the 64-bit TSS.
2016 *
2017 * @returns VBox strict status code.
2018 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2019 * @param uCpl The CPL to load the stack for.
2020 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2021 * @param puRsp Where to return the new stack pointer.
2022 */
2023static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
2024{
2025 Assert(uCpl < 4);
2026 Assert(uIst < 8);
2027 *puRsp = 0; /* make gcc happy */
2028
2029 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2030 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
2031
2032 uint32_t off;
2033 if (uIst)
2034 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
2035 else
2036 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
2037 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
2038 {
2039 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
2040 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2041 }
2042
2043 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2044}
2045
2046
2047/**
2048 * Adjust the CPU state according to the exception being raised.
2049 *
2050 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2051 * @param u8Vector The exception that has been raised.
2052 */
2053DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
2054{
2055 switch (u8Vector)
2056 {
2057 case X86_XCPT_DB:
2058 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
2059 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
2060 break;
2061 /** @todo Read the AMD and Intel exception reference... */
2062 }
2063}
2064
2065
2066/**
2067 * Implements exceptions and interrupts for real mode.
2068 *
2069 * @returns VBox strict status code.
2070 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2071 * @param cbInstr The number of bytes to offset rIP by in the return
2072 * address.
2073 * @param u8Vector The interrupt / exception vector number.
2074 * @param fFlags The flags.
2075 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2076 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2077 */
2078static VBOXSTRICTRC
2079iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
2080 uint8_t cbInstr,
2081 uint8_t u8Vector,
2082 uint32_t fFlags,
2083 uint16_t uErr,
2084 uint64_t uCr2) RT_NOEXCEPT
2085{
2086 NOREF(uErr); NOREF(uCr2);
2087 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2088
2089 /*
2090 * Read the IDT entry.
2091 */
2092 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2093 {
2094 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2095 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2096 }
2097 RTFAR16 Idte;
2098 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
2099 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2100 {
2101 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2102 return rcStrict;
2103 }
2104
2105 /*
2106 * Push the stack frame.
2107 */
2108 uint8_t bUnmapInfo;
2109 uint16_t *pu16Frame;
2110 uint64_t uNewRsp;
2111 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &bUnmapInfo, &uNewRsp);
2112 if (rcStrict != VINF_SUCCESS)
2113 return rcStrict;
2114
2115 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
2116#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
2117 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
2118 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
2119 fEfl |= UINT16_C(0xf000);
2120#endif
2121 pu16Frame[2] = (uint16_t)fEfl;
2122 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
2123 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
2124 rcStrict = iemMemStackPushCommitSpecial(pVCpu, bUnmapInfo, uNewRsp);
2125 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2126 return rcStrict;
2127
2128 /*
2129 * Load the vector address into cs:ip and make exception specific state
2130 * adjustments.
2131 */
2132 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
2133 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
2134 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2135 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
2136 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2137 pVCpu->cpum.GstCtx.rip = Idte.off;
2138 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
2139 IEMMISC_SET_EFL(pVCpu, fEfl);
2140
2141 /** @todo do we actually do this in real mode? */
2142 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2143 iemRaiseXcptAdjustState(pVCpu, u8Vector);
2144
2145 /* The IEM_F_MODE_XXX and IEM_F_X86_CPL_MASK doesn't really change here,
2146 so best leave them alone in case we're in a weird kind of real mode... */
2147
2148 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2149}
2150
2151
2152/**
2153 * Loads a NULL data selector into when coming from V8086 mode.
2154 *
2155 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2156 * @param pSReg Pointer to the segment register.
2157 */
2158DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
2159{
2160 pSReg->Sel = 0;
2161 pSReg->ValidSel = 0;
2162 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2163 {
2164 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2165 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2166 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2167 }
2168 else
2169 {
2170 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2171 /** @todo check this on AMD-V */
2172 pSReg->u64Base = 0;
2173 pSReg->u32Limit = 0;
2174 }
2175}
2176
2177
2178/**
2179 * Loads a segment selector during a task switch in V8086 mode.
2180 *
2181 * @param pSReg Pointer to the segment register.
2182 * @param uSel The selector value to load.
2183 */
2184DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
2185{
2186 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2187 pSReg->Sel = uSel;
2188 pSReg->ValidSel = uSel;
2189 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2190 pSReg->u64Base = uSel << 4;
2191 pSReg->u32Limit = 0xffff;
2192 pSReg->Attr.u = 0xf3;
2193}
2194
2195
2196/**
2197 * Loads a segment selector during a task switch in protected mode.
2198 *
2199 * In this task switch scenario, we would throw \#TS exceptions rather than
2200 * \#GPs.
2201 *
2202 * @returns VBox strict status code.
2203 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2204 * @param pSReg Pointer to the segment register.
2205 * @param uSel The new selector value.
2206 *
2207 * @remarks This does _not_ handle CS or SS.
2208 * @remarks This expects IEM_GET_CPL(pVCpu) to return an up to date value.
2209 */
2210static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
2211{
2212 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2213
2214 /* Null data selector. */
2215 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2216 {
2217 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2218 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2219 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2220 return VINF_SUCCESS;
2221 }
2222
2223 /* Fetch the descriptor. */
2224 IEMSELDESC Desc;
2225 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2226 if (rcStrict != VINF_SUCCESS)
2227 {
2228 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2229 VBOXSTRICTRC_VAL(rcStrict)));
2230 return rcStrict;
2231 }
2232
2233 /* Must be a data segment or readable code segment. */
2234 if ( !Desc.Legacy.Gen.u1DescType
2235 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2236 {
2237 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2238 Desc.Legacy.Gen.u4Type));
2239 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2240 }
2241
2242 /* Check privileges for data segments and non-conforming code segments. */
2243 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2244 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2245 {
2246 /* The RPL and the new CPL must be less than or equal to the DPL. */
2247 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2248 || (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl))
2249 {
2250 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2251 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
2252 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2253 }
2254 }
2255
2256 /* Is it there? */
2257 if (!Desc.Legacy.Gen.u1Present)
2258 {
2259 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2260 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2261 }
2262
2263 /* The base and limit. */
2264 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2265 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2266
2267 /*
2268 * Ok, everything checked out fine. Now set the accessed bit before
2269 * committing the result into the registers.
2270 */
2271 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2272 {
2273 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2274 if (rcStrict != VINF_SUCCESS)
2275 return rcStrict;
2276 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2277 }
2278
2279 /* Commit */
2280 pSReg->Sel = uSel;
2281 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2282 pSReg->u32Limit = cbLimit;
2283 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2284 pSReg->ValidSel = uSel;
2285 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2286 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2287 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2288
2289 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2290 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2291 return VINF_SUCCESS;
2292}
2293
2294
2295/**
2296 * Performs a task switch.
2297 *
2298 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2299 * caller is responsible for performing the necessary checks (like DPL, TSS
2300 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2301 * reference for JMP, CALL, IRET.
2302 *
2303 * If the task switch is the due to a software interrupt or hardware exception,
2304 * the caller is responsible for validating the TSS selector and descriptor. See
2305 * Intel Instruction reference for INT n.
2306 *
2307 * @returns VBox strict status code.
2308 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2309 * @param enmTaskSwitch The cause of the task switch.
2310 * @param uNextEip The EIP effective after the task switch.
2311 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2312 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2313 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2314 * @param SelTss The TSS selector of the new task.
2315 * @param pNewDescTss Pointer to the new TSS descriptor.
2316 */
2317VBOXSTRICTRC
2318iemTaskSwitch(PVMCPUCC pVCpu,
2319 IEMTASKSWITCH enmTaskSwitch,
2320 uint32_t uNextEip,
2321 uint32_t fFlags,
2322 uint16_t uErr,
2323 uint64_t uCr2,
2324 RTSEL SelTss,
2325 PIEMSELDESC pNewDescTss) RT_NOEXCEPT
2326{
2327 Assert(!IEM_IS_REAL_MODE(pVCpu));
2328 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2329 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2330
2331 uint32_t const uNewTssType = pNewDescTss->Legacy.Gate.u4Type;
2332 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2333 || uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2334 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2335 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2336
2337 bool const fIsNewTss386 = ( uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2338 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2339
2340 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTss=%#x fIsNewTss386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTss,
2341 fIsNewTss386, pVCpu->cpum.GstCtx.eip, uNextEip));
2342
2343 /* Update CR2 in case it's a page-fault. */
2344 /** @todo This should probably be done much earlier in IEM/PGM. See
2345 * @bugref{5653#c49}. */
2346 if (fFlags & IEM_XCPT_FLAGS_CR2)
2347 pVCpu->cpum.GstCtx.cr2 = uCr2;
2348
2349 /*
2350 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2351 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2352 */
2353 uint32_t const uNewTssLimit = pNewDescTss->Legacy.Gen.u16LimitLow | (pNewDescTss->Legacy.Gen.u4LimitHigh << 16);
2354 uint32_t const uNewTssLimitMin = fIsNewTss386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2355 if (uNewTssLimit < uNewTssLimitMin)
2356 {
2357 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTssLimit=%#x uNewTssLimitMin=%#x -> #TS\n",
2358 enmTaskSwitch, uNewTssLimit, uNewTssLimitMin));
2359 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
2360 }
2361
2362 /*
2363 * Task switches in VMX non-root mode always cause task switches.
2364 * The new TSS must have been read and validated (DPL, limits etc.) before a
2365 * task-switch VM-exit commences.
2366 *
2367 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2368 */
2369 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2370 {
2371 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTss));
2372 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTss, uNextEip - pVCpu->cpum.GstCtx.eip);
2373 }
2374
2375 /*
2376 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2377 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2378 */
2379 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2380 {
2381 uint32_t const uExitInfo1 = SelTss;
2382 uint32_t uExitInfo2 = uErr;
2383 switch (enmTaskSwitch)
2384 {
2385 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2386 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2387 default: break;
2388 }
2389 if (fFlags & IEM_XCPT_FLAGS_ERR)
2390 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2391 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2392 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2393
2394 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2395 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2396 RT_NOREF2(uExitInfo1, uExitInfo2);
2397 }
2398
2399 /*
2400 * Check the current TSS limit. The last written byte to the current TSS during the
2401 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2402 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2403 *
2404 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2405 * end up with smaller than "legal" TSS limits.
2406 */
2407 uint32_t const uCurTssLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2408 uint32_t const uCurTssLimitMin = fIsNewTss386 ? 0x5F : 0x29;
2409 if (uCurTssLimit < uCurTssLimitMin)
2410 {
2411 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTssLimit=%#x uCurTssLimitMin=%#x -> #TS\n",
2412 enmTaskSwitch, uCurTssLimit, uCurTssLimitMin));
2413 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
2414 }
2415
2416 /*
2417 * Verify that the new TSS can be accessed and map it. Map only the required contents
2418 * and not the entire TSS.
2419 */
2420 uint8_t bUnmapInfoNewTss;
2421 void *pvNewTss;
2422 uint32_t const cbNewTss = uNewTssLimitMin + 1;
2423 RTGCPTR const GCPtrNewTss = X86DESC_BASE(&pNewDescTss->Legacy);
2424 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2425 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2426 * not perform correct translation if this happens. See Intel spec. 7.2.1
2427 * "Task-State Segment". */
2428 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTss, &bUnmapInfoNewTss, cbNewTss, UINT8_MAX, GCPtrNewTss, IEM_ACCESS_SYS_RW, 0);
2429/** @todo Not cleaning up bUnmapInfoNewTss mapping in any early exits here.
2430 * Consider wrapping the remainder into a function for simpler cleanup. */
2431 if (rcStrict != VINF_SUCCESS)
2432 {
2433 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTss=%u uNewTssLimit=%u rc=%Rrc\n", enmTaskSwitch,
2434 cbNewTss, uNewTssLimit, VBOXSTRICTRC_VAL(rcStrict)));
2435 return rcStrict;
2436 }
2437
2438 /*
2439 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2440 */
2441 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
2442 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2443 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2444 {
2445 uint8_t bUnmapInfoDescCurTss;
2446 PX86DESC pDescCurTss;
2447 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTss, &bUnmapInfoDescCurTss, sizeof(*pDescCurTss), UINT8_MAX,
2448 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2449 if (rcStrict != VINF_SUCCESS)
2450 {
2451 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2452 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2453 return rcStrict;
2454 }
2455
2456 pDescCurTss->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2457 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoDescCurTss);
2458 if (rcStrict != VINF_SUCCESS)
2459 {
2460 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2461 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2462 return rcStrict;
2463 }
2464
2465 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2466 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2467 {
2468 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2469 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2470 fEFlags &= ~X86_EFL_NT;
2471 }
2472 }
2473
2474 /*
2475 * Save the CPU state into the current TSS.
2476 */
2477 RTGCPTR const GCPtrCurTss = pVCpu->cpum.GstCtx.tr.u64Base;
2478 if (GCPtrNewTss == GCPtrCurTss)
2479 {
2480 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTss));
2481 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2482 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,
2483 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2484 pVCpu->cpum.GstCtx.ldtr.Sel));
2485 }
2486 if (fIsNewTss386)
2487 {
2488 /*
2489 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2490 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2491 */
2492 uint8_t bUnmapInfoCurTss32;
2493 void *pvCurTss32;
2494 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS32, eip);
2495 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2496 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2497 rcStrict = iemMemMap(pVCpu, &pvCurTss32, &bUnmapInfoCurTss32, cbCurTss, UINT8_MAX,
2498 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
2499 if (rcStrict != VINF_SUCCESS)
2500 {
2501 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
2502 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
2503 return rcStrict;
2504 }
2505
2506 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
2507 PX86TSS32 pCurTss32 = (PX86TSS32)((uintptr_t)pvCurTss32 - offCurTss);
2508 pCurTss32->eip = uNextEip;
2509 pCurTss32->eflags = fEFlags;
2510 pCurTss32->eax = pVCpu->cpum.GstCtx.eax;
2511 pCurTss32->ecx = pVCpu->cpum.GstCtx.ecx;
2512 pCurTss32->edx = pVCpu->cpum.GstCtx.edx;
2513 pCurTss32->ebx = pVCpu->cpum.GstCtx.ebx;
2514 pCurTss32->esp = pVCpu->cpum.GstCtx.esp;
2515 pCurTss32->ebp = pVCpu->cpum.GstCtx.ebp;
2516 pCurTss32->esi = pVCpu->cpum.GstCtx.esi;
2517 pCurTss32->edi = pVCpu->cpum.GstCtx.edi;
2518 pCurTss32->es = pVCpu->cpum.GstCtx.es.Sel;
2519 pCurTss32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2520 pCurTss32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2521 pCurTss32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2522 pCurTss32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2523 pCurTss32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2524
2525 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss32);
2526 if (rcStrict != VINF_SUCCESS)
2527 {
2528 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2529 VBOXSTRICTRC_VAL(rcStrict)));
2530 return rcStrict;
2531 }
2532 }
2533 else
2534 {
2535 /*
2536 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2537 */
2538 uint8_t bUnmapInfoCurTss16;
2539 void *pvCurTss16;
2540 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS16, ip);
2541 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2542 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2543 rcStrict = iemMemMap(pVCpu, &pvCurTss16, &bUnmapInfoCurTss16, cbCurTss, UINT8_MAX,
2544 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
2545 if (rcStrict != VINF_SUCCESS)
2546 {
2547 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
2548 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
2549 return rcStrict;
2550 }
2551
2552 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
2553 PX86TSS16 pCurTss16 = (PX86TSS16)((uintptr_t)pvCurTss16 - offCurTss);
2554 pCurTss16->ip = uNextEip;
2555 pCurTss16->flags = (uint16_t)fEFlags;
2556 pCurTss16->ax = pVCpu->cpum.GstCtx.ax;
2557 pCurTss16->cx = pVCpu->cpum.GstCtx.cx;
2558 pCurTss16->dx = pVCpu->cpum.GstCtx.dx;
2559 pCurTss16->bx = pVCpu->cpum.GstCtx.bx;
2560 pCurTss16->sp = pVCpu->cpum.GstCtx.sp;
2561 pCurTss16->bp = pVCpu->cpum.GstCtx.bp;
2562 pCurTss16->si = pVCpu->cpum.GstCtx.si;
2563 pCurTss16->di = pVCpu->cpum.GstCtx.di;
2564 pCurTss16->es = pVCpu->cpum.GstCtx.es.Sel;
2565 pCurTss16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2566 pCurTss16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2567 pCurTss16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2568
2569 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss16);
2570 if (rcStrict != VINF_SUCCESS)
2571 {
2572 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2573 VBOXSTRICTRC_VAL(rcStrict)));
2574 return rcStrict;
2575 }
2576 }
2577
2578 /*
2579 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2580 */
2581 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2582 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2583 {
2584 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2585 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTss;
2586 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2587 }
2588
2589 /*
2590 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2591 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2592 */
2593 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2594 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2595 bool fNewDebugTrap;
2596 if (fIsNewTss386)
2597 {
2598 PCX86TSS32 pNewTss32 = (PCX86TSS32)pvNewTss;
2599 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTss32->cr3 : 0;
2600 uNewEip = pNewTss32->eip;
2601 uNewEflags = pNewTss32->eflags;
2602 uNewEax = pNewTss32->eax;
2603 uNewEcx = pNewTss32->ecx;
2604 uNewEdx = pNewTss32->edx;
2605 uNewEbx = pNewTss32->ebx;
2606 uNewEsp = pNewTss32->esp;
2607 uNewEbp = pNewTss32->ebp;
2608 uNewEsi = pNewTss32->esi;
2609 uNewEdi = pNewTss32->edi;
2610 uNewES = pNewTss32->es;
2611 uNewCS = pNewTss32->cs;
2612 uNewSS = pNewTss32->ss;
2613 uNewDS = pNewTss32->ds;
2614 uNewFS = pNewTss32->fs;
2615 uNewGS = pNewTss32->gs;
2616 uNewLdt = pNewTss32->selLdt;
2617 fNewDebugTrap = RT_BOOL(pNewTss32->fDebugTrap);
2618 }
2619 else
2620 {
2621 PCX86TSS16 pNewTss16 = (PCX86TSS16)pvNewTss;
2622 uNewCr3 = 0;
2623 uNewEip = pNewTss16->ip;
2624 uNewEflags = pNewTss16->flags;
2625 uNewEax = UINT32_C(0xffff0000) | pNewTss16->ax;
2626 uNewEcx = UINT32_C(0xffff0000) | pNewTss16->cx;
2627 uNewEdx = UINT32_C(0xffff0000) | pNewTss16->dx;
2628 uNewEbx = UINT32_C(0xffff0000) | pNewTss16->bx;
2629 uNewEsp = UINT32_C(0xffff0000) | pNewTss16->sp;
2630 uNewEbp = UINT32_C(0xffff0000) | pNewTss16->bp;
2631 uNewEsi = UINT32_C(0xffff0000) | pNewTss16->si;
2632 uNewEdi = UINT32_C(0xffff0000) | pNewTss16->di;
2633 uNewES = pNewTss16->es;
2634 uNewCS = pNewTss16->cs;
2635 uNewSS = pNewTss16->ss;
2636 uNewDS = pNewTss16->ds;
2637 uNewFS = 0;
2638 uNewGS = 0;
2639 uNewLdt = pNewTss16->selLdt;
2640 fNewDebugTrap = false;
2641 }
2642
2643 if (GCPtrNewTss == GCPtrCurTss)
2644 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2645 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2646
2647 /*
2648 * We're done accessing the new TSS.
2649 */
2650 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
2651 if (rcStrict != VINF_SUCCESS)
2652 {
2653 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2654 return rcStrict;
2655 }
2656
2657 /*
2658 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2659 */
2660 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2661 {
2662 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTss, &bUnmapInfoNewTss, sizeof(*pNewDescTss), UINT8_MAX,
2663 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTss & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2664 if (rcStrict != VINF_SUCCESS)
2665 {
2666 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2667 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2668 return rcStrict;
2669 }
2670
2671 /* Check that the descriptor indicates the new TSS is available (not busy). */
2672 AssertMsg( pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2673 || pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2674 ("Invalid TSS descriptor type=%#x", pNewDescTss->Legacy.Gate.u4Type));
2675
2676 pNewDescTss->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2677 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
2678 if (rcStrict != VINF_SUCCESS)
2679 {
2680 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2681 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2682 return rcStrict;
2683 }
2684 }
2685
2686 /*
2687 * From this point on, we're technically in the new task. We will defer exceptions
2688 * until the completion of the task switch but before executing any instructions in the new task.
2689 */
2690 pVCpu->cpum.GstCtx.tr.Sel = SelTss;
2691 pVCpu->cpum.GstCtx.tr.ValidSel = SelTss;
2692 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2693 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTss->Legacy);
2694 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTss->Legacy);
2695 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTss->Legacy);
2696 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
2697
2698 /* Set the busy bit in TR. */
2699 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2700
2701 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2702 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2703 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2704 {
2705 uNewEflags |= X86_EFL_NT;
2706 }
2707
2708 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2709 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
2710 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
2711
2712 pVCpu->cpum.GstCtx.eip = uNewEip;
2713 pVCpu->cpum.GstCtx.eax = uNewEax;
2714 pVCpu->cpum.GstCtx.ecx = uNewEcx;
2715 pVCpu->cpum.GstCtx.edx = uNewEdx;
2716 pVCpu->cpum.GstCtx.ebx = uNewEbx;
2717 pVCpu->cpum.GstCtx.esp = uNewEsp;
2718 pVCpu->cpum.GstCtx.ebp = uNewEbp;
2719 pVCpu->cpum.GstCtx.esi = uNewEsi;
2720 pVCpu->cpum.GstCtx.edi = uNewEdi;
2721
2722 uNewEflags &= X86_EFL_LIVE_MASK;
2723 uNewEflags |= X86_EFL_RA1_MASK;
2724 IEMMISC_SET_EFL(pVCpu, uNewEflags);
2725
2726 /*
2727 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2728 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2729 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2730 */
2731 pVCpu->cpum.GstCtx.es.Sel = uNewES;
2732 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
2733
2734 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2735 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
2736
2737 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2738 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
2739
2740 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
2741 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
2742
2743 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
2744 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
2745
2746 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
2747 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
2748 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2749
2750 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
2751 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2752 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
2753 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
2754
2755 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2756 {
2757 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
2758 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
2759 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
2760 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
2761 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
2762 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
2763 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2764 }
2765
2766 /*
2767 * Switch CR3 for the new task.
2768 */
2769 if ( fIsNewTss386
2770 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
2771 {
2772 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2773 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
2774 AssertRCSuccessReturn(rc, rc);
2775
2776 /* Inform PGM. */
2777 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
2778 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
2779 AssertRCReturn(rc, rc);
2780 /* ignore informational status codes */
2781
2782 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
2783 }
2784
2785 /*
2786 * Switch LDTR for the new task.
2787 */
2788 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2789 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
2790 else
2791 {
2792 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2793
2794 IEMSELDESC DescNewLdt;
2795 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2796 if (rcStrict != VINF_SUCCESS)
2797 {
2798 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2799 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2800 return rcStrict;
2801 }
2802 if ( !DescNewLdt.Legacy.Gen.u1Present
2803 || DescNewLdt.Legacy.Gen.u1DescType
2804 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2805 {
2806 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2807 uNewLdt, DescNewLdt.Legacy.u));
2808 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2809 }
2810
2811 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
2812 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2813 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2814 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2815 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2816 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2817 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2818 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
2819 }
2820
2821 IEMSELDESC DescSS;
2822 if (IEM_IS_V86_MODE(pVCpu))
2823 {
2824 IEM_SET_CPL(pVCpu, 3);
2825 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
2826 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
2827 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
2828 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
2829 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
2830 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
2831
2832 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
2833 DescSS.Legacy.u = 0;
2834 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
2835 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
2836 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
2837 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
2838 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
2839 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
2840 DescSS.Legacy.Gen.u2Dpl = 3;
2841 }
2842 else
2843 {
2844 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
2845
2846 /*
2847 * Load the stack segment for the new task.
2848 */
2849 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2850 {
2851 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2852 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2853 }
2854
2855 /* Fetch the descriptor. */
2856 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
2857 if (rcStrict != VINF_SUCCESS)
2858 {
2859 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2860 VBOXSTRICTRC_VAL(rcStrict)));
2861 return rcStrict;
2862 }
2863
2864 /* SS must be a data segment and writable. */
2865 if ( !DescSS.Legacy.Gen.u1DescType
2866 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2867 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2868 {
2869 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2870 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2871 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2872 }
2873
2874 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2875 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2876 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2877 {
2878 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2879 uNewCpl));
2880 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2881 }
2882
2883 /* Is it there? */
2884 if (!DescSS.Legacy.Gen.u1Present)
2885 {
2886 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
2887 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2888 }
2889
2890 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
2891 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
2892
2893 /* Set the accessed bit before committing the result into SS. */
2894 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2895 {
2896 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
2897 if (rcStrict != VINF_SUCCESS)
2898 return rcStrict;
2899 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2900 }
2901
2902 /* Commit SS. */
2903 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2904 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
2905 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2906 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
2907 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
2908 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2909 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
2910
2911 /* CPL has changed, update IEM before loading rest of segments. */
2912 IEM_SET_CPL(pVCpu, uNewCpl);
2913
2914 /*
2915 * Load the data segments for the new task.
2916 */
2917 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
2918 if (rcStrict != VINF_SUCCESS)
2919 return rcStrict;
2920 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
2921 if (rcStrict != VINF_SUCCESS)
2922 return rcStrict;
2923 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
2924 if (rcStrict != VINF_SUCCESS)
2925 return rcStrict;
2926 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
2927 if (rcStrict != VINF_SUCCESS)
2928 return rcStrict;
2929
2930 /*
2931 * Load the code segment for the new task.
2932 */
2933 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
2934 {
2935 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
2936 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2937 }
2938
2939 /* Fetch the descriptor. */
2940 IEMSELDESC DescCS;
2941 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
2942 if (rcStrict != VINF_SUCCESS)
2943 {
2944 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
2945 return rcStrict;
2946 }
2947
2948 /* CS must be a code segment. */
2949 if ( !DescCS.Legacy.Gen.u1DescType
2950 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2951 {
2952 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
2953 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
2954 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2955 }
2956
2957 /* For conforming CS, DPL must be less than or equal to the RPL. */
2958 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2959 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
2960 {
2961 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
2962 DescCS.Legacy.Gen.u2Dpl));
2963 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2964 }
2965
2966 /* For non-conforming CS, DPL must match RPL. */
2967 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2968 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
2969 {
2970 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
2971 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
2972 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2973 }
2974
2975 /* Is it there? */
2976 if (!DescCS.Legacy.Gen.u1Present)
2977 {
2978 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
2979 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2980 }
2981
2982 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
2983 u64Base = X86DESC_BASE(&DescCS.Legacy);
2984
2985 /* Set the accessed bit before committing the result into CS. */
2986 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2987 {
2988 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
2989 if (rcStrict != VINF_SUCCESS)
2990 return rcStrict;
2991 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2992 }
2993
2994 /* Commit CS. */
2995 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2996 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
2997 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2998 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
2999 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
3000 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3001 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
3002 }
3003
3004 /* Make sure the CPU mode is correct. */
3005 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3006 if (fExecNew != pVCpu->iem.s.fExec)
3007 Log(("iemTaskSwitch: fExec %#x -> %#x (xor %#x)\n", pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3008 pVCpu->iem.s.fExec = fExecNew;
3009
3010 /** @todo Debug trap. */
3011 if (fIsNewTss386 && fNewDebugTrap)
3012 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
3013
3014 /*
3015 * Construct the error code masks based on what caused this task switch.
3016 * See Intel Instruction reference for INT.
3017 */
3018 uint16_t uExt;
3019 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
3020 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3021 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
3022 uExt = 1;
3023 else
3024 uExt = 0;
3025
3026 /*
3027 * Push any error code on to the new stack.
3028 */
3029 if (fFlags & IEM_XCPT_FLAGS_ERR)
3030 {
3031 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3032 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3033 uint8_t const cbStackFrame = fIsNewTss386 ? 4 : 2;
3034
3035 /* Check that there is sufficient space on the stack. */
3036 /** @todo Factor out segment limit checking for normal/expand down segments
3037 * into a separate function. */
3038 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3039 {
3040 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
3041 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
3042 {
3043 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3044 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
3045 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3046 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3047 }
3048 }
3049 else
3050 {
3051 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
3052 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3053 {
3054 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
3055 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3056 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3057 }
3058 }
3059
3060
3061 if (fIsNewTss386)
3062 rcStrict = iemMemStackPushU32(pVCpu, uErr);
3063 else
3064 rcStrict = iemMemStackPushU16(pVCpu, uErr);
3065 if (rcStrict != VINF_SUCCESS)
3066 {
3067 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
3068 fIsNewTss386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
3069 return rcStrict;
3070 }
3071 }
3072
3073 /* Check the new EIP against the new CS limit. */
3074 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
3075 {
3076 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
3077 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
3078 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3079 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
3080 }
3081
3082 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3083 pVCpu->cpum.GstCtx.ss.Sel));
3084 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3085}
3086
3087
3088/**
3089 * Implements exceptions and interrupts for protected mode.
3090 *
3091 * @returns VBox strict status code.
3092 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3093 * @param cbInstr The number of bytes to offset rIP by in the return
3094 * address.
3095 * @param u8Vector The interrupt / exception vector number.
3096 * @param fFlags The flags.
3097 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3098 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3099 */
3100static VBOXSTRICTRC
3101iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
3102 uint8_t cbInstr,
3103 uint8_t u8Vector,
3104 uint32_t fFlags,
3105 uint16_t uErr,
3106 uint64_t uCr2) RT_NOEXCEPT
3107{
3108 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3109
3110 /*
3111 * Read the IDT entry.
3112 */
3113 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3114 {
3115 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3116 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3117 }
3118 X86DESC Idte;
3119 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
3120 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
3121 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3122 {
3123 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3124 return rcStrict;
3125 }
3126 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x - from %04x:%08RX64 efl=%#x depth=%d\n",
3127 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3128 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow,
3129 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->iem.s.cXcptRecursions));
3130
3131 /*
3132 * Check the descriptor type, DPL and such.
3133 * ASSUMES this is done in the same order as described for call-gate calls.
3134 */
3135 if (Idte.Gate.u1DescType)
3136 {
3137 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3138 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3139 }
3140 bool fTaskGate = false;
3141 uint8_t f32BitGate = true;
3142 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3143 switch (Idte.Gate.u4Type)
3144 {
3145 case X86_SEL_TYPE_SYS_UNDEFINED:
3146 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3147 case X86_SEL_TYPE_SYS_LDT:
3148 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3149 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3150 case X86_SEL_TYPE_SYS_UNDEFINED2:
3151 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3152 case X86_SEL_TYPE_SYS_UNDEFINED3:
3153 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3154 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3155 case X86_SEL_TYPE_SYS_UNDEFINED4:
3156 {
3157 /** @todo check what actually happens when the type is wrong...
3158 * esp. call gates. */
3159 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3160 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3161 }
3162
3163 case X86_SEL_TYPE_SYS_286_INT_GATE:
3164 f32BitGate = false;
3165 RT_FALL_THRU();
3166 case X86_SEL_TYPE_SYS_386_INT_GATE:
3167 fEflToClear |= X86_EFL_IF;
3168 break;
3169
3170 case X86_SEL_TYPE_SYS_TASK_GATE:
3171 fTaskGate = true;
3172#ifndef IEM_IMPLEMENTS_TASKSWITCH
3173 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3174#endif
3175 break;
3176
3177 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3178 f32BitGate = false;
3179 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3180 break;
3181
3182 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3183 }
3184
3185 /* Check DPL against CPL if applicable. */
3186 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3187 {
3188 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3189 {
3190 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3191 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3192 }
3193 }
3194
3195 /* Is it there? */
3196 if (!Idte.Gate.u1Present)
3197 {
3198 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3199 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3200 }
3201
3202 /* Is it a task-gate? */
3203 if (fTaskGate)
3204 {
3205 /*
3206 * Construct the error code masks based on what caused this task switch.
3207 * See Intel Instruction reference for INT.
3208 */
3209 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3210 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
3211 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3212 RTSEL SelTss = Idte.Gate.u16Sel;
3213
3214 /*
3215 * Fetch the TSS descriptor in the GDT.
3216 */
3217 IEMSELDESC DescTSS;
3218 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTss, X86_XCPT_GP, (SelTss & uSelMask) | uExt);
3219 if (rcStrict != VINF_SUCCESS)
3220 {
3221 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTss,
3222 VBOXSTRICTRC_VAL(rcStrict)));
3223 return rcStrict;
3224 }
3225
3226 /* The TSS descriptor must be a system segment and be available (not busy). */
3227 if ( DescTSS.Legacy.Gen.u1DescType
3228 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3229 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3230 {
3231 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3232 u8Vector, SelTss, DescTSS.Legacy.au64));
3233 return iemRaiseGeneralProtectionFault(pVCpu, (SelTss & uSelMask) | uExt);
3234 }
3235
3236 /* The TSS must be present. */
3237 if (!DescTSS.Legacy.Gen.u1Present)
3238 {
3239 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTss, DescTSS.Legacy.au64));
3240 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTss & uSelMask) | uExt);
3241 }
3242
3243 /* Do the actual task switch. */
3244 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3245 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3246 fFlags, uErr, uCr2, SelTss, &DescTSS);
3247 }
3248
3249 /* A null CS is bad. */
3250 RTSEL NewCS = Idte.Gate.u16Sel;
3251 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3252 {
3253 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3254 return iemRaiseGeneralProtectionFault0(pVCpu);
3255 }
3256
3257 /* Fetch the descriptor for the new CS. */
3258 IEMSELDESC DescCS;
3259 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3260 if (rcStrict != VINF_SUCCESS)
3261 {
3262 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3263 return rcStrict;
3264 }
3265
3266 /* Must be a code segment. */
3267 if (!DescCS.Legacy.Gen.u1DescType)
3268 {
3269 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3270 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3271 }
3272 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3273 {
3274 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3275 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3276 }
3277
3278 /* Don't allow lowering the privilege level. */
3279 /** @todo Does the lowering of privileges apply to software interrupts
3280 * only? This has bearings on the more-privileged or
3281 * same-privilege stack behavior further down. A testcase would
3282 * be nice. */
3283 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3284 {
3285 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3286 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3287 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3288 }
3289
3290 /* Make sure the selector is present. */
3291 if (!DescCS.Legacy.Gen.u1Present)
3292 {
3293 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3294 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3295 }
3296
3297#ifdef LOG_ENABLED
3298 /* If software interrupt, try decode it if logging is enabled and such. */
3299 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3300 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
3301 iemLogSyscallProtModeInt(pVCpu, u8Vector, cbInstr);
3302#endif
3303
3304 /* Check the new EIP against the new CS limit. */
3305 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3306 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3307 ? Idte.Gate.u16OffsetLow
3308 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3309 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3310 if (uNewEip > cbLimitCS)
3311 {
3312 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3313 u8Vector, uNewEip, cbLimitCS, NewCS));
3314 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3315 }
3316 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3317
3318 /* Calc the flag image to push. */
3319 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3320 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3321 fEfl &= ~X86_EFL_RF;
3322 else
3323 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3324
3325 /* From V8086 mode only go to CPL 0. */
3326 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3327 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3328 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3329 {
3330 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3331 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3332 }
3333
3334 /*
3335 * If the privilege level changes, we need to get a new stack from the TSS.
3336 * This in turns means validating the new SS and ESP...
3337 */
3338 if (uNewCpl != IEM_GET_CPL(pVCpu))
3339 {
3340 RTSEL NewSS;
3341 uint32_t uNewEsp;
3342 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3343 if (rcStrict != VINF_SUCCESS)
3344 return rcStrict;
3345
3346 IEMSELDESC DescSS;
3347 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3348 if (rcStrict != VINF_SUCCESS)
3349 return rcStrict;
3350 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3351 if (!DescSS.Legacy.Gen.u1DefBig)
3352 {
3353 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3354 uNewEsp = (uint16_t)uNewEsp;
3355 }
3356
3357 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3358
3359 /* Check that there is sufficient space for the stack frame. */
3360 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3361 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3362 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3363 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3364
3365 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3366 {
3367 if ( uNewEsp - 1 > cbLimitSS
3368 || uNewEsp < cbStackFrame)
3369 {
3370 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3371 u8Vector, NewSS, uNewEsp, cbStackFrame));
3372 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3373 }
3374 }
3375 else
3376 {
3377 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3378 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3379 {
3380 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3381 u8Vector, NewSS, uNewEsp, cbStackFrame));
3382 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3383 }
3384 }
3385
3386 /*
3387 * Start making changes.
3388 */
3389
3390 /* Set the new CPL so that stack accesses use it. */
3391 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3392 IEM_SET_CPL(pVCpu, uNewCpl);
3393
3394 /* Create the stack frame. */
3395 uint8_t bUnmapInfoStackFrame;
3396 RTPTRUNION uStackFrame;
3397 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
3398 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
3399 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3400 if (rcStrict != VINF_SUCCESS)
3401 return rcStrict;
3402 if (f32BitGate)
3403 {
3404 if (fFlags & IEM_XCPT_FLAGS_ERR)
3405 *uStackFrame.pu32++ = uErr;
3406 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3407 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3408 uStackFrame.pu32[2] = fEfl;
3409 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3410 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3411 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3412 if (fEfl & X86_EFL_VM)
3413 {
3414 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3415 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3416 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3417 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3418 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3419 }
3420 }
3421 else
3422 {
3423 if (fFlags & IEM_XCPT_FLAGS_ERR)
3424 *uStackFrame.pu16++ = uErr;
3425 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3426 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3427 uStackFrame.pu16[2] = fEfl;
3428 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3429 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3430 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3431 if (fEfl & X86_EFL_VM)
3432 {
3433 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3434 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3435 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3436 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3437 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3438 }
3439 }
3440 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
3441 if (rcStrict != VINF_SUCCESS)
3442 return rcStrict;
3443
3444 /* Mark the selectors 'accessed' (hope this is the correct time). */
3445 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3446 * after pushing the stack frame? (Write protect the gdt + stack to
3447 * find out.) */
3448 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3449 {
3450 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3451 if (rcStrict != VINF_SUCCESS)
3452 return rcStrict;
3453 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3454 }
3455
3456 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3457 {
3458 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3459 if (rcStrict != VINF_SUCCESS)
3460 return rcStrict;
3461 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3462 }
3463
3464 /*
3465 * Start comitting the register changes (joins with the DPL=CPL branch).
3466 */
3467 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3468 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3469 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3470 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3471 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3472 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3473 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3474 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3475 * SP is loaded).
3476 * Need to check the other combinations too:
3477 * - 16-bit TSS, 32-bit handler
3478 * - 32-bit TSS, 16-bit handler */
3479 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3480 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3481 else
3482 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3483
3484 if (fEfl & X86_EFL_VM)
3485 {
3486 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3487 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3488 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3489 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3490 }
3491 }
3492 /*
3493 * Same privilege, no stack change and smaller stack frame.
3494 */
3495 else
3496 {
3497 uint64_t uNewRsp;
3498 uint8_t bUnmapInfoStackFrame;
3499 RTPTRUNION uStackFrame;
3500 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3501 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1,
3502 &uStackFrame.pv, &bUnmapInfoStackFrame, &uNewRsp);
3503 if (rcStrict != VINF_SUCCESS)
3504 return rcStrict;
3505
3506 if (f32BitGate)
3507 {
3508 if (fFlags & IEM_XCPT_FLAGS_ERR)
3509 *uStackFrame.pu32++ = uErr;
3510 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3511 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3512 uStackFrame.pu32[2] = fEfl;
3513 }
3514 else
3515 {
3516 if (fFlags & IEM_XCPT_FLAGS_ERR)
3517 *uStackFrame.pu16++ = uErr;
3518 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3519 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3520 uStackFrame.pu16[2] = fEfl;
3521 }
3522 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame); /* don't use the commit here */
3523 if (rcStrict != VINF_SUCCESS)
3524 return rcStrict;
3525
3526 /* Mark the CS selector as 'accessed'. */
3527 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3528 {
3529 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3530 if (rcStrict != VINF_SUCCESS)
3531 return rcStrict;
3532 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3533 }
3534
3535 /*
3536 * Start committing the register changes (joins with the other branch).
3537 */
3538 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3539 }
3540
3541 /* ... register committing continues. */
3542 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3543 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3544 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3545 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3546 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3547 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3548
3549 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3550 fEfl &= ~fEflToClear;
3551 IEMMISC_SET_EFL(pVCpu, fEfl);
3552
3553 if (fFlags & IEM_XCPT_FLAGS_CR2)
3554 pVCpu->cpum.GstCtx.cr2 = uCr2;
3555
3556 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3557 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3558
3559 /* Make sure the execution flags are correct. */
3560 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3561 if (fExecNew != pVCpu->iem.s.fExec)
3562 Log(("iemRaiseXcptOrIntInProtMode: fExec %#x -> %#x (xor %#x)\n",
3563 pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3564 pVCpu->iem.s.fExec = fExecNew;
3565 Assert(IEM_GET_CPL(pVCpu) == uNewCpl);
3566
3567 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3568}
3569
3570
3571/**
3572 * Implements exceptions and interrupts for long mode.
3573 *
3574 * @returns VBox strict status code.
3575 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3576 * @param cbInstr The number of bytes to offset rIP by in the return
3577 * address.
3578 * @param u8Vector The interrupt / exception vector number.
3579 * @param fFlags The flags.
3580 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3581 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3582 */
3583static VBOXSTRICTRC
3584iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3585 uint8_t cbInstr,
3586 uint8_t u8Vector,
3587 uint32_t fFlags,
3588 uint16_t uErr,
3589 uint64_t uCr2) RT_NOEXCEPT
3590{
3591 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3592
3593 /*
3594 * Read the IDT entry.
3595 */
3596 uint16_t offIdt = (uint16_t)u8Vector << 4;
3597 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3598 {
3599 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3600 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3601 }
3602 X86DESC64 Idte;
3603#ifdef _MSC_VER /* Shut up silly compiler warning. */
3604 Idte.au64[0] = 0;
3605 Idte.au64[1] = 0;
3606#endif
3607 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3608 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3609 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3610 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3611 {
3612 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3613 return rcStrict;
3614 }
3615 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3616 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3617 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3618
3619 /*
3620 * Check the descriptor type, DPL and such.
3621 * ASSUMES this is done in the same order as described for call-gate calls.
3622 */
3623 if (Idte.Gate.u1DescType)
3624 {
3625 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3626 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3627 }
3628 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3629 switch (Idte.Gate.u4Type)
3630 {
3631 case AMD64_SEL_TYPE_SYS_INT_GATE:
3632 fEflToClear |= X86_EFL_IF;
3633 break;
3634 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3635 break;
3636
3637 default:
3638 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3639 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3640 }
3641
3642 /* Check DPL against CPL if applicable. */
3643 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3644 {
3645 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3646 {
3647 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3648 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3649 }
3650 }
3651
3652 /* Is it there? */
3653 if (!Idte.Gate.u1Present)
3654 {
3655 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3656 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3657 }
3658
3659 /* A null CS is bad. */
3660 RTSEL NewCS = Idte.Gate.u16Sel;
3661 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3662 {
3663 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3664 return iemRaiseGeneralProtectionFault0(pVCpu);
3665 }
3666
3667 /* Fetch the descriptor for the new CS. */
3668 IEMSELDESC DescCS;
3669 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
3670 if (rcStrict != VINF_SUCCESS)
3671 {
3672 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3673 return rcStrict;
3674 }
3675
3676 /* Must be a 64-bit code segment. */
3677 if (!DescCS.Long.Gen.u1DescType)
3678 {
3679 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3680 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3681 }
3682 if ( !DescCS.Long.Gen.u1Long
3683 || DescCS.Long.Gen.u1DefBig
3684 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3685 {
3686 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3687 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3688 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3689 }
3690
3691 /* Don't allow lowering the privilege level. For non-conforming CS
3692 selectors, the CS.DPL sets the privilege level the trap/interrupt
3693 handler runs at. For conforming CS selectors, the CPL remains
3694 unchanged, but the CS.DPL must be <= CPL. */
3695 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3696 * when CPU in Ring-0. Result \#GP? */
3697 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3698 {
3699 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3700 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3701 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3702 }
3703
3704
3705 /* Make sure the selector is present. */
3706 if (!DescCS.Legacy.Gen.u1Present)
3707 {
3708 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3709 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3710 }
3711
3712 /* Check that the new RIP is canonical. */
3713 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3714 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3715 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3716 if (!IEM_IS_CANONICAL(uNewRip))
3717 {
3718 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3719 return iemRaiseGeneralProtectionFault0(pVCpu);
3720 }
3721
3722 /*
3723 * If the privilege level changes or if the IST isn't zero, we need to get
3724 * a new stack from the TSS.
3725 */
3726 uint64_t uNewRsp;
3727 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3728 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3729 if ( uNewCpl != IEM_GET_CPL(pVCpu)
3730 || Idte.Gate.u3IST != 0)
3731 {
3732 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3733 if (rcStrict != VINF_SUCCESS)
3734 return rcStrict;
3735 }
3736 else
3737 uNewRsp = pVCpu->cpum.GstCtx.rsp;
3738 uNewRsp &= ~(uint64_t)0xf;
3739
3740 /*
3741 * Calc the flag image to push.
3742 */
3743 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3744 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3745 fEfl &= ~X86_EFL_RF;
3746 else
3747 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3748
3749 /*
3750 * Start making changes.
3751 */
3752 /* Set the new CPL so that stack accesses use it. */
3753 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3754 IEM_SET_CPL(pVCpu, uNewCpl);
3755/** @todo Setting CPL this early seems wrong as it would affect and errors we
3756 * raise accessing the stack and (?) GDT/LDT... */
3757
3758 /* Create the stack frame. */
3759 uint8_t bUnmapInfoStackFrame;
3760 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3761 RTPTRUNION uStackFrame;
3762 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
3763 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3764 if (rcStrict != VINF_SUCCESS)
3765 return rcStrict;
3766
3767 if (fFlags & IEM_XCPT_FLAGS_ERR)
3768 *uStackFrame.pu64++ = uErr;
3769 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
3770 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
3771 uStackFrame.pu64[2] = fEfl;
3772 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
3773 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
3774 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
3775 if (rcStrict != VINF_SUCCESS)
3776 return rcStrict;
3777
3778 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3779 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3780 * after pushing the stack frame? (Write protect the gdt + stack to
3781 * find out.) */
3782 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3783 {
3784 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3785 if (rcStrict != VINF_SUCCESS)
3786 return rcStrict;
3787 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3788 }
3789
3790 /*
3791 * Start comitting the register changes.
3792 */
3793 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3794 * hidden registers when interrupting 32-bit or 16-bit code! */
3795 if (uNewCpl != uOldCpl)
3796 {
3797 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
3798 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
3799 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3800 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3801 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3802 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3803 }
3804 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
3805 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3806 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3807 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3808 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3809 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3810 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3811 pVCpu->cpum.GstCtx.rip = uNewRip;
3812
3813 fEfl &= ~fEflToClear;
3814 IEMMISC_SET_EFL(pVCpu, fEfl);
3815
3816 if (fFlags & IEM_XCPT_FLAGS_CR2)
3817 pVCpu->cpum.GstCtx.cr2 = uCr2;
3818
3819 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3820 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3821
3822 iemRecalcExecModeAndCplFlags(pVCpu);
3823
3824 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3825}
3826
3827
3828/**
3829 * Implements exceptions and interrupts.
3830 *
3831 * All exceptions and interrupts goes thru this function!
3832 *
3833 * @returns VBox strict status code.
3834 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3835 * @param cbInstr The number of bytes to offset rIP by in the return
3836 * address.
3837 * @param u8Vector The interrupt / exception vector number.
3838 * @param fFlags The flags.
3839 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3840 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3841 */
3842VBOXSTRICTRC
3843iemRaiseXcptOrInt(PVMCPUCC pVCpu,
3844 uint8_t cbInstr,
3845 uint8_t u8Vector,
3846 uint32_t fFlags,
3847 uint16_t uErr,
3848 uint64_t uCr2) RT_NOEXCEPT
3849{
3850 /*
3851 * Get all the state that we might need here.
3852 */
3853 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3854 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3855
3856#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
3857 /*
3858 * Flush prefetch buffer
3859 */
3860 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3861#endif
3862
3863 /*
3864 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3865 */
3866 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
3867 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
3868 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
3869 | IEM_XCPT_FLAGS_BP_INSTR
3870 | IEM_XCPT_FLAGS_ICEBP_INSTR
3871 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3872 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
3873 {
3874 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3875 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3876 u8Vector = X86_XCPT_GP;
3877 uErr = 0;
3878 }
3879
3880 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3881#ifdef DBGFTRACE_ENABLED
3882 RTTraceBufAddMsgF(pVM->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3883 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3884 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
3885#endif
3886
3887 /*
3888 * Check if DBGF wants to intercept the exception.
3889 */
3890 if ( (fFlags & (IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_T_SOFT_INT))
3891 || !DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector)) )
3892 { /* likely */ }
3893 else
3894 {
3895 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector),
3896 DBGFEVENTCTX_INVALID, 1, (uint64_t)uErr);
3897 if (rcStrict != VINF_SUCCESS)
3898 return rcStrict;
3899 }
3900
3901 /*
3902 * Evaluate whether NMI blocking should be in effect.
3903 * Normally, NMI blocking is in effect whenever we inject an NMI.
3904 */
3905 bool fBlockNmi = u8Vector == X86_XCPT_NMI
3906 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);
3907
3908#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3909 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3910 {
3911 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
3912 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3913 return rcStrict0;
3914
3915 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
3916 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
3917 {
3918 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
3919 fBlockNmi = false;
3920 }
3921 }
3922#endif
3923
3924#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3925 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
3926 {
3927 /*
3928 * If the event is being injected as part of VMRUN, it isn't subject to event
3929 * intercepts in the nested-guest. However, secondary exceptions that occur
3930 * during injection of any event -are- subject to exception intercepts.
3931 *
3932 * See AMD spec. 15.20 "Event Injection".
3933 */
3934 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
3935 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
3936 else
3937 {
3938 /*
3939 * Check and handle if the event being raised is intercepted.
3940 */
3941 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3942 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
3943 return rcStrict0;
3944 }
3945 }
3946#endif
3947
3948 /*
3949 * Set NMI blocking if necessary.
3950 */
3951 if (fBlockNmi)
3952 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
3953
3954 /*
3955 * Do recursion accounting.
3956 */
3957 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
3958 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
3959 if (pVCpu->iem.s.cXcptRecursions == 0)
3960 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3961 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
3962 else
3963 {
3964 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3965 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
3966 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
3967
3968 if (pVCpu->iem.s.cXcptRecursions >= 4)
3969 {
3970#ifdef DEBUG_bird
3971 AssertFailed();
3972#endif
3973 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3974 }
3975
3976 /*
3977 * Evaluate the sequence of recurring events.
3978 */
3979 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
3980 NULL /* pXcptRaiseInfo */);
3981 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
3982 { /* likely */ }
3983 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
3984 {
3985 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
3986 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3987 u8Vector = X86_XCPT_DF;
3988 uErr = 0;
3989#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3990 /* VMX nested-guest #DF intercept needs to be checked here. */
3991 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3992 {
3993 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
3994 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3995 return rcStrict0;
3996 }
3997#endif
3998 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
3999 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
4000 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4001 }
4002 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
4003 {
4004 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
4005 return iemInitiateCpuShutdown(pVCpu);
4006 }
4007 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
4008 {
4009 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
4010 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
4011 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
4012 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
4013 return VERR_EM_GUEST_CPU_HANG;
4014 }
4015 else
4016 {
4017 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
4018 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
4019 return VERR_IEM_IPE_9;
4020 }
4021
4022 /*
4023 * The 'EXT' bit is set when an exception occurs during deliver of an external
4024 * event (such as an interrupt or earlier exception)[1]. Privileged software
4025 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
4026 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
4027 *
4028 * [1] - Intel spec. 6.13 "Error Code"
4029 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
4030 * [3] - Intel Instruction reference for INT n.
4031 */
4032 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
4033 && (fFlags & IEM_XCPT_FLAGS_ERR)
4034 && u8Vector != X86_XCPT_PF
4035 && u8Vector != X86_XCPT_DF)
4036 {
4037 uErr |= X86_TRAP_ERR_EXTERNAL;
4038 }
4039 }
4040
4041 pVCpu->iem.s.cXcptRecursions++;
4042 pVCpu->iem.s.uCurXcpt = u8Vector;
4043 pVCpu->iem.s.fCurXcpt = fFlags;
4044 pVCpu->iem.s.uCurXcptErr = uErr;
4045 pVCpu->iem.s.uCurXcptCr2 = uCr2;
4046
4047 /*
4048 * Extensive logging.
4049 */
4050#if defined(LOG_ENABLED) && defined(IN_RING3)
4051 if (LogIs3Enabled())
4052 {
4053 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
4054 char szRegs[4096];
4055 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4056 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4057 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4058 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4059 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4060 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4061 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4062 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4063 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4064 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4065 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4066 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4067 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4068 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4069 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4070 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4071 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4072 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4073 " efer=%016VR{efer}\n"
4074 " pat=%016VR{pat}\n"
4075 " sf_mask=%016VR{sf_mask}\n"
4076 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4077 " lstar=%016VR{lstar}\n"
4078 " star=%016VR{star} cstar=%016VR{cstar}\n"
4079 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4080 );
4081
4082 char szInstr[256];
4083 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4084 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4085 szInstr, sizeof(szInstr), NULL);
4086 Log3(("%s%s\n", szRegs, szInstr));
4087 }
4088#endif /* LOG_ENABLED */
4089
4090 /*
4091 * Stats.
4092 */
4093 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
4094 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
4095 else if (u8Vector <= X86_XCPT_LAST)
4096 {
4097 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
4098 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
4099 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, ASMReadTSC());
4100 }
4101
4102 /*
4103 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
4104 * to ensure that a stale TLB or paging cache entry will only cause one
4105 * spurious #PF.
4106 */
4107 if ( u8Vector == X86_XCPT_PF
4108 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
4109 IEMTlbInvalidatePage(pVCpu, uCr2);
4110
4111 /*
4112 * Call the mode specific worker function.
4113 */
4114 VBOXSTRICTRC rcStrict;
4115 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4116 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4117 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
4118 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4119 else
4120 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4121
4122 /* Flush the prefetch buffer. */
4123 iemOpcodeFlushHeavy(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
4124
4125 /*
4126 * Unwind.
4127 */
4128 pVCpu->iem.s.cXcptRecursions--;
4129 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
4130 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
4131 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
4132 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel,
4133 pVCpu->cpum.GstCtx.esp, IEM_GET_CPL(pVCpu), pVCpu->iem.s.cXcptRecursions + 1));
4134 return rcStrict;
4135}
4136
4137#ifdef IEM_WITH_SETJMP
4138/**
4139 * See iemRaiseXcptOrInt. Will not return.
4140 */
4141DECL_NO_RETURN(void)
4142iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
4143 uint8_t cbInstr,
4144 uint8_t u8Vector,
4145 uint32_t fFlags,
4146 uint16_t uErr,
4147 uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP
4148{
4149 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4150 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
4151}
4152#endif
4153
4154
4155/** \#DE - 00. */
4156VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
4157{
4158 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4159}
4160
4161
4162/** \#DB - 01.
4163 * @note This automatically clear DR7.GD. */
4164VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
4165{
4166 /* This always clears RF (via IEM_XCPT_FLAGS_DRx_INSTR_BP). */
4167 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
4168 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_DRx_INSTR_BP, 0, 0);
4169}
4170
4171
4172/** \#BR - 05. */
4173VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
4174{
4175 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4176}
4177
4178
4179/** \#UD - 06. */
4180VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
4181{
4182 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4183}
4184
4185
4186/** \#NM - 07. */
4187VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
4188{
4189 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4190}
4191
4192
4193/** \#TS(err) - 0a. */
4194VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4195{
4196 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4197}
4198
4199
4200/** \#TS(tr) - 0a. */
4201VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
4202{
4203 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4204 pVCpu->cpum.GstCtx.tr.Sel, 0);
4205}
4206
4207
4208/** \#TS(0) - 0a. */
4209VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4210{
4211 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4212 0, 0);
4213}
4214
4215
4216/** \#TS(err) - 0a. */
4217VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4218{
4219 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4220 uSel & X86_SEL_MASK_OFF_RPL, 0);
4221}
4222
4223
4224/** \#NP(err) - 0b. */
4225VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4226{
4227 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4228}
4229
4230
4231/** \#NP(sel) - 0b. */
4232VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4233{
4234 Log(("iemRaiseSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4235 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4236 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4237 uSel & ~X86_SEL_RPL, 0);
4238}
4239
4240
4241/** \#SS(seg) - 0c. */
4242VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4243{
4244 Log(("iemRaiseStackSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4245 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4246 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4247 uSel & ~X86_SEL_RPL, 0);
4248}
4249
4250
4251/** \#SS(err) - 0c. */
4252VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4253{
4254 Log(("iemRaiseStackSelectorNotPresentWithErr: cs:rip=%04x:%RX64 uErr=%#x\n",
4255 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4256 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4257}
4258
4259
4260/** \#GP(n) - 0d. */
4261VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4262{
4263 Log(("iemRaiseGeneralProtectionFault: cs:rip=%04x:%RX64 uErr=%#x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4264 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4265}
4266
4267
4268/** \#GP(0) - 0d. */
4269VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4270{
4271 Log(("iemRaiseGeneralProtectionFault0: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4272 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4273}
4274
4275#ifdef IEM_WITH_SETJMP
4276/** \#GP(0) - 0d. */
4277DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4278{
4279 Log(("iemRaiseGeneralProtectionFault0Jmp: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4280 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4281}
4282#endif
4283
4284
4285/** \#GP(sel) - 0d. */
4286VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4287{
4288 Log(("iemRaiseGeneralProtectionFaultBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
4289 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4290 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4291 Sel & ~X86_SEL_RPL, 0);
4292}
4293
4294
4295/** \#GP(0) - 0d. */
4296VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4297{
4298 Log(("iemRaiseNotCanonical: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4299 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4300}
4301
4302
4303/** \#GP(sel) - 0d. */
4304VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4305{
4306 Log(("iemRaiseSelectorBounds: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4307 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4308 NOREF(iSegReg); NOREF(fAccess);
4309 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4310 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4311}
4312
4313#ifdef IEM_WITH_SETJMP
4314/** \#GP(sel) - 0d, longjmp. */
4315DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4316{
4317 Log(("iemRaiseSelectorBoundsJmp: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4318 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4319 NOREF(iSegReg); NOREF(fAccess);
4320 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4321 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4322}
4323#endif
4324
4325/** \#GP(sel) - 0d. */
4326VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4327{
4328 Log(("iemRaiseSelectorBoundsBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
4329 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4330 NOREF(Sel);
4331 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4332}
4333
4334#ifdef IEM_WITH_SETJMP
4335/** \#GP(sel) - 0d, longjmp. */
4336DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP
4337{
4338 Log(("iemRaiseSelectorBoundsBySelectorJmp: cs:rip=%04x:%RX64 Sel=%#x\n",
4339 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4340 NOREF(Sel);
4341 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4342}
4343#endif
4344
4345
4346/** \#GP(sel) - 0d. */
4347VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4348{
4349 Log(("iemRaiseSelectorInvalidAccess: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4350 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4351 NOREF(iSegReg); NOREF(fAccess);
4352 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4353}
4354
4355#ifdef IEM_WITH_SETJMP
4356/** \#GP(sel) - 0d, longjmp. */
4357DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4358{
4359 NOREF(iSegReg); NOREF(fAccess);
4360 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4361}
4362#endif
4363
4364
4365/** \#PF(n) - 0e. */
4366VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT
4367{
4368 uint16_t uErr;
4369 switch (rc)
4370 {
4371 case VERR_PAGE_NOT_PRESENT:
4372 case VERR_PAGE_TABLE_NOT_PRESENT:
4373 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4374 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4375 uErr = 0;
4376 break;
4377
4378 default:
4379 AssertMsgFailed(("%Rrc\n", rc));
4380 RT_FALL_THRU();
4381 case VERR_ACCESS_DENIED:
4382 uErr = X86_TRAP_PF_P;
4383 break;
4384
4385 /** @todo reserved */
4386 }
4387
4388 if (IEM_GET_CPL(pVCpu) == 3)
4389 uErr |= X86_TRAP_PF_US;
4390
4391 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4392 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4393 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4394 uErr |= X86_TRAP_PF_ID;
4395
4396#if 0 /* This is so much non-sense, really. Why was it done like that? */
4397 /* Note! RW access callers reporting a WRITE protection fault, will clear
4398 the READ flag before calling. So, read-modify-write accesses (RW)
4399 can safely be reported as READ faults. */
4400 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4401 uErr |= X86_TRAP_PF_RW;
4402#else
4403 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4404 {
4405 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4406 /// (regardless of outcome of the comparison in the latter case).
4407 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4408 uErr |= X86_TRAP_PF_RW;
4409 }
4410#endif
4411
4412 /* For FXSAVE and FRSTOR the #PF is typically reported at the max address
4413 of the memory operand rather than at the start of it. (Not sure what
4414 happens if it crosses a page boundrary.) The current heuristics for
4415 this is to report the #PF for the last byte if the access is more than
4416 64 bytes. This is probably not correct, but we can work that out later,
4417 main objective now is to get FXSAVE to work like for real hardware and
4418 make bs3-cpu-basic2 work. */
4419 if (cbAccess <= 64)
4420 { /* likely*/ }
4421 else
4422 GCPtrWhere += cbAccess - 1;
4423
4424 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4425 uErr, GCPtrWhere);
4426}
4427
4428#ifdef IEM_WITH_SETJMP
4429/** \#PF(n) - 0e, longjmp. */
4430DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess,
4431 uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP
4432{
4433 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, cbAccess, fAccess, rc)));
4434}
4435#endif
4436
4437
4438/** \#MF(0) - 10. */
4439VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT
4440{
4441 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)
4442 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4443
4444 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
4445 PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
4446 return iemRegUpdateRipAndFinishClearingRF(pVCpu);
4447}
4448
4449
4450/** \#AC(0) - 11. */
4451VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT
4452{
4453 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4454}
4455
4456#ifdef IEM_WITH_SETJMP
4457/** \#AC(0) - 11, longjmp. */
4458DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4459{
4460 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4461}
4462#endif
4463
4464
4465/** \#XF(0)/\#XM(0) - 19. */
4466VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
4467{
4468 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4469}
4470
4471
4472/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4473IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4474{
4475 NOREF(cbInstr);
4476 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4477}
4478
4479
4480/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4481IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4482{
4483 NOREF(cbInstr);
4484 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4485}
4486
4487
4488/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4489IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4490{
4491 NOREF(cbInstr);
4492 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4493}
4494
4495
4496/** @} */
4497
4498/** @name Common opcode decoders.
4499 * @{
4500 */
4501//#include <iprt/mem.h>
4502
4503/**
4504 * Used to add extra details about a stub case.
4505 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4506 */
4507void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4508{
4509#if defined(LOG_ENABLED) && defined(IN_RING3)
4510 PVM pVM = pVCpu->CTX_SUFF(pVM);
4511 char szRegs[4096];
4512 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4513 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4514 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4515 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4516 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4517 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4518 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4519 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4520 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4521 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4522 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4523 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4524 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4525 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4526 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4527 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4528 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4529 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4530 " efer=%016VR{efer}\n"
4531 " pat=%016VR{pat}\n"
4532 " sf_mask=%016VR{sf_mask}\n"
4533 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4534 " lstar=%016VR{lstar}\n"
4535 " star=%016VR{star} cstar=%016VR{cstar}\n"
4536 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4537 );
4538
4539 char szInstr[256];
4540 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4541 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4542 szInstr, sizeof(szInstr), NULL);
4543
4544 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4545#else
4546 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4547#endif
4548}
4549
4550/** @} */
4551
4552
4553
4554/** @name Register Access.
4555 * @{
4556 */
4557
4558/**
4559 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4560 *
4561 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4562 * segment limit.
4563 *
4564 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4565 * @param cbInstr Instruction size.
4566 * @param offNextInstr The offset of the next instruction.
4567 * @param enmEffOpSize Effective operand size.
4568 */
4569VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
4570 IEMMODE enmEffOpSize) RT_NOEXCEPT
4571{
4572 switch (enmEffOpSize)
4573 {
4574 case IEMMODE_16BIT:
4575 {
4576 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
4577 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4578 || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */))
4579 pVCpu->cpum.GstCtx.rip = uNewIp;
4580 else
4581 return iemRaiseGeneralProtectionFault0(pVCpu);
4582 break;
4583 }
4584
4585 case IEMMODE_32BIT:
4586 {
4587 Assert(!IEM_IS_64BIT_CODE(pVCpu));
4588 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4589
4590 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
4591 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4592 pVCpu->cpum.GstCtx.rip = uNewEip;
4593 else
4594 return iemRaiseGeneralProtectionFault0(pVCpu);
4595 break;
4596 }
4597
4598 case IEMMODE_64BIT:
4599 {
4600 Assert(IEM_IS_64BIT_CODE(pVCpu));
4601
4602 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4603 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4604 pVCpu->cpum.GstCtx.rip = uNewRip;
4605 else
4606 return iemRaiseGeneralProtectionFault0(pVCpu);
4607 break;
4608 }
4609
4610 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4611 }
4612
4613#ifndef IEM_WITH_CODE_TLB
4614 /* Flush the prefetch buffer. */
4615 pVCpu->iem.s.cbOpcode = cbInstr;
4616#endif
4617
4618 /*
4619 * Clear RF and finish the instruction (maybe raise #DB).
4620 */
4621 return iemRegFinishClearingRF(pVCpu);
4622}
4623
4624
4625/**
4626 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4627 *
4628 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4629 * segment limit.
4630 *
4631 * @returns Strict VBox status code.
4632 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4633 * @param cbInstr Instruction size.
4634 * @param offNextInstr The offset of the next instruction.
4635 */
4636VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT
4637{
4638 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
4639
4640 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
4641 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4642 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checking in 64-bit mode */))
4643 pVCpu->cpum.GstCtx.rip = uNewIp;
4644 else
4645 return iemRaiseGeneralProtectionFault0(pVCpu);
4646
4647#ifndef IEM_WITH_CODE_TLB
4648 /* Flush the prefetch buffer. */
4649 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4650#endif
4651
4652 /*
4653 * Clear RF and finish the instruction (maybe raise #DB).
4654 */
4655 return iemRegFinishClearingRF(pVCpu);
4656}
4657
4658
4659/**
4660 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4661 *
4662 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4663 * segment limit.
4664 *
4665 * @returns Strict VBox status code.
4666 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4667 * @param cbInstr Instruction size.
4668 * @param offNextInstr The offset of the next instruction.
4669 * @param enmEffOpSize Effective operand size.
4670 */
4671VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
4672 IEMMODE enmEffOpSize) RT_NOEXCEPT
4673{
4674 if (enmEffOpSize == IEMMODE_32BIT)
4675 {
4676 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
4677
4678 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
4679 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4680 pVCpu->cpum.GstCtx.rip = uNewEip;
4681 else
4682 return iemRaiseGeneralProtectionFault0(pVCpu);
4683 }
4684 else
4685 {
4686 Assert(enmEffOpSize == IEMMODE_64BIT);
4687
4688 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4689 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4690 pVCpu->cpum.GstCtx.rip = uNewRip;
4691 else
4692 return iemRaiseGeneralProtectionFault0(pVCpu);
4693 }
4694
4695#ifndef IEM_WITH_CODE_TLB
4696 /* Flush the prefetch buffer. */
4697 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4698#endif
4699
4700 /*
4701 * Clear RF and finish the instruction (maybe raise #DB).
4702 */
4703 return iemRegFinishClearingRF(pVCpu);
4704}
4705
4706/** @} */
4707
4708
4709/** @name FPU access and helpers.
4710 *
4711 * @{
4712 */
4713
4714/**
4715 * Updates the x87.DS and FPUDP registers.
4716 *
4717 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4718 * @param pFpuCtx The FPU context.
4719 * @param iEffSeg The effective segment register.
4720 * @param GCPtrEff The effective address relative to @a iEffSeg.
4721 */
4722DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4723{
4724 RTSEL sel;
4725 switch (iEffSeg)
4726 {
4727 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
4728 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
4729 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
4730 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
4731 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
4732 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
4733 default:
4734 AssertMsgFailed(("%d\n", iEffSeg));
4735 sel = pVCpu->cpum.GstCtx.ds.Sel;
4736 }
4737 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
4738 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4739 {
4740 pFpuCtx->DS = 0;
4741 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
4742 }
4743 else if (!IEM_IS_LONG_MODE(pVCpu)) /** @todo this is weird. explain. */
4744 {
4745 pFpuCtx->DS = sel;
4746 pFpuCtx->FPUDP = GCPtrEff;
4747 }
4748 else
4749 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
4750}
4751
4752
4753/**
4754 * Rotates the stack registers in the push direction.
4755 *
4756 * @param pFpuCtx The FPU context.
4757 * @remarks This is a complete waste of time, but fxsave stores the registers in
4758 * stack order.
4759 */
4760DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
4761{
4762 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
4763 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
4764 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
4765 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
4766 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
4767 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
4768 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
4769 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
4770 pFpuCtx->aRegs[0].r80 = r80Tmp;
4771}
4772
4773
4774/**
4775 * Rotates the stack registers in the pop direction.
4776 *
4777 * @param pFpuCtx The FPU context.
4778 * @remarks This is a complete waste of time, but fxsave stores the registers in
4779 * stack order.
4780 */
4781DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
4782{
4783 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
4784 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
4785 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
4786 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
4787 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
4788 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
4789 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
4790 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
4791 pFpuCtx->aRegs[7].r80 = r80Tmp;
4792}
4793
4794
4795/**
4796 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
4797 * exception prevents it.
4798 *
4799 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4800 * @param pResult The FPU operation result to push.
4801 * @param pFpuCtx The FPU context.
4802 */
4803static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4804{
4805 /* Update FSW and bail if there are pending exceptions afterwards. */
4806 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4807 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4808 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4809 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4810 {
4811 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))
4812 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",
4813 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4814 pFpuCtx->FSW = fFsw;
4815 return;
4816 }
4817
4818 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4819 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4820 {
4821 /* All is fine, push the actual value. */
4822 pFpuCtx->FTW |= RT_BIT(iNewTop);
4823 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
4824 }
4825 else if (pFpuCtx->FCW & X86_FCW_IM)
4826 {
4827 /* Masked stack overflow, push QNaN. */
4828 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4829 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4830 }
4831 else
4832 {
4833 /* Raise stack overflow, don't push anything. */
4834 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4835 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4836 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",
4837 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4838 return;
4839 }
4840
4841 fFsw &= ~X86_FSW_TOP_MASK;
4842 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4843 pFpuCtx->FSW = fFsw;
4844
4845 iemFpuRotateStackPush(pFpuCtx);
4846 RT_NOREF(pVCpu);
4847}
4848
4849
4850/**
4851 * Stores a result in a FPU register and updates the FSW and FTW.
4852 *
4853 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4854 * @param pFpuCtx The FPU context.
4855 * @param pResult The result to store.
4856 * @param iStReg Which FPU register to store it in.
4857 */
4858static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4859{
4860 Assert(iStReg < 8);
4861 uint16_t fNewFsw = pFpuCtx->FSW;
4862 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;
4863 fNewFsw &= ~X86_FSW_C_MASK;
4864 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4865 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4866 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4867 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4868 pFpuCtx->FSW = fNewFsw;
4869 pFpuCtx->FTW |= RT_BIT(iReg);
4870 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
4871 RT_NOREF(pVCpu);
4872}
4873
4874
4875/**
4876 * Only updates the FPU status word (FSW) with the result of the current
4877 * instruction.
4878 *
4879 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4880 * @param pFpuCtx The FPU context.
4881 * @param u16FSW The FSW output of the current instruction.
4882 */
4883static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
4884{
4885 uint16_t fNewFsw = pFpuCtx->FSW;
4886 fNewFsw &= ~X86_FSW_C_MASK;
4887 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;
4888 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4889 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4890 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4891 pFpuCtx->FSW = fNewFsw;
4892 RT_NOREF(pVCpu);
4893}
4894
4895
4896/**
4897 * Pops one item off the FPU stack if no pending exception prevents it.
4898 *
4899 * @param pFpuCtx The FPU context.
4900 */
4901static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4902{
4903 /* Check pending exceptions. */
4904 uint16_t uFSW = pFpuCtx->FSW;
4905 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4906 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4907 return;
4908
4909 /* TOP--. */
4910 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
4911 uFSW &= ~X86_FSW_TOP_MASK;
4912 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4913 pFpuCtx->FSW = uFSW;
4914
4915 /* Mark the previous ST0 as empty. */
4916 iOldTop >>= X86_FSW_TOP_SHIFT;
4917 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
4918
4919 /* Rotate the registers. */
4920 iemFpuRotateStackPop(pFpuCtx);
4921}
4922
4923
4924/**
4925 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
4926 *
4927 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4928 * @param pResult The FPU operation result to push.
4929 * @param uFpuOpcode The FPU opcode value.
4930 */
4931void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
4932{
4933 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4934 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
4935 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4936}
4937
4938
4939/**
4940 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
4941 * and sets FPUDP and FPUDS.
4942 *
4943 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4944 * @param pResult The FPU operation result to push.
4945 * @param iEffSeg The effective segment register.
4946 * @param GCPtrEff The effective address relative to @a iEffSeg.
4947 * @param uFpuOpcode The FPU opcode value.
4948 */
4949void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff,
4950 uint16_t uFpuOpcode) RT_NOEXCEPT
4951{
4952 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4953 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4954 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
4955 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4956}
4957
4958
4959/**
4960 * Replace ST0 with the first value and push the second onto the FPU stack,
4961 * unless a pending exception prevents it.
4962 *
4963 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4964 * @param pResult The FPU operation result to store and push.
4965 * @param uFpuOpcode The FPU opcode value.
4966 */
4967void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
4968{
4969 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4970 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
4971
4972 /* Update FSW and bail if there are pending exceptions afterwards. */
4973 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4974 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4975 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4976 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4977 {
4978 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4979 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",
4980 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4981 pFpuCtx->FSW = fFsw;
4982 return;
4983 }
4984
4985 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4986 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4987 {
4988 /* All is fine, push the actual value. */
4989 pFpuCtx->FTW |= RT_BIT(iNewTop);
4990 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
4991 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
4992 }
4993 else if (pFpuCtx->FCW & X86_FCW_IM)
4994 {
4995 /* Masked stack overflow, push QNaN. */
4996 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4997 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
4998 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4999 }
5000 else
5001 {
5002 /* Raise stack overflow, don't push anything. */
5003 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5004 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5005 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",
5006 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5007 return;
5008 }
5009
5010 fFsw &= ~X86_FSW_TOP_MASK;
5011 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5012 pFpuCtx->FSW = fFsw;
5013
5014 iemFpuRotateStackPush(pFpuCtx);
5015}
5016
5017
5018/**
5019 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5020 * FOP.
5021 *
5022 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5023 * @param pResult The result to store.
5024 * @param iStReg Which FPU register to store it in.
5025 * @param uFpuOpcode The FPU opcode value.
5026 */
5027void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5028{
5029 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5030 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5031 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5032}
5033
5034
5035/**
5036 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5037 * FOP, and then pops the stack.
5038 *
5039 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5040 * @param pResult The result to store.
5041 * @param iStReg Which FPU register to store it in.
5042 * @param uFpuOpcode The FPU opcode value.
5043 */
5044void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5045{
5046 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5047 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5048 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5049 iemFpuMaybePopOne(pFpuCtx);
5050}
5051
5052
5053/**
5054 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5055 * FPUDP, and FPUDS.
5056 *
5057 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5058 * @param pResult The result to store.
5059 * @param iStReg Which FPU register to store it in.
5060 * @param iEffSeg The effective memory operand selector register.
5061 * @param GCPtrEff The effective memory operand offset.
5062 * @param uFpuOpcode The FPU opcode value.
5063 */
5064void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5065 uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5066{
5067 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5068 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5069 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5070 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5071}
5072
5073
5074/**
5075 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5076 * FPUDP, and FPUDS, and then pops the stack.
5077 *
5078 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5079 * @param pResult The result to store.
5080 * @param iStReg Which FPU register to store it in.
5081 * @param iEffSeg The effective memory operand selector register.
5082 * @param GCPtrEff The effective memory operand offset.
5083 * @param uFpuOpcode The FPU opcode value.
5084 */
5085void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
5086 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5087{
5088 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5089 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5090 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5091 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5092 iemFpuMaybePopOne(pFpuCtx);
5093}
5094
5095
5096/**
5097 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5098 *
5099 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5100 * @param uFpuOpcode The FPU opcode value.
5101 */
5102void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5103{
5104 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5105 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5106}
5107
5108
5109/**
5110 * Updates the FSW, FOP, FPUIP, and FPUCS.
5111 *
5112 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5113 * @param u16FSW The FSW from the current instruction.
5114 * @param uFpuOpcode The FPU opcode value.
5115 */
5116void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5117{
5118 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5119 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5120 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5121}
5122
5123
5124/**
5125 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5126 *
5127 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5128 * @param u16FSW The FSW from the current instruction.
5129 * @param uFpuOpcode The FPU opcode value.
5130 */
5131void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5132{
5133 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5134 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5135 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5136 iemFpuMaybePopOne(pFpuCtx);
5137}
5138
5139
5140/**
5141 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5142 *
5143 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5144 * @param u16FSW The FSW from the current instruction.
5145 * @param iEffSeg The effective memory operand selector register.
5146 * @param GCPtrEff The effective memory operand offset.
5147 * @param uFpuOpcode The FPU opcode value.
5148 */
5149void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5150{
5151 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5152 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5153 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5154 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5155}
5156
5157
5158/**
5159 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5160 *
5161 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5162 * @param u16FSW The FSW from the current instruction.
5163 * @param uFpuOpcode The FPU opcode value.
5164 */
5165void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5166{
5167 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5168 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5169 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5170 iemFpuMaybePopOne(pFpuCtx);
5171 iemFpuMaybePopOne(pFpuCtx);
5172}
5173
5174
5175/**
5176 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5177 *
5178 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5179 * @param u16FSW The FSW from the current instruction.
5180 * @param iEffSeg The effective memory operand selector register.
5181 * @param GCPtrEff The effective memory operand offset.
5182 * @param uFpuOpcode The FPU opcode value.
5183 */
5184void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5185 uint16_t uFpuOpcode) RT_NOEXCEPT
5186{
5187 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5188 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5189 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5190 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5191 iemFpuMaybePopOne(pFpuCtx);
5192}
5193
5194
5195/**
5196 * Worker routine for raising an FPU stack underflow exception.
5197 *
5198 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5199 * @param pFpuCtx The FPU context.
5200 * @param iStReg The stack register being accessed.
5201 */
5202static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5203{
5204 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5205 if (pFpuCtx->FCW & X86_FCW_IM)
5206 {
5207 /* Masked underflow. */
5208 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5209 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5210 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5211 if (iStReg != UINT8_MAX)
5212 {
5213 pFpuCtx->FTW |= RT_BIT(iReg);
5214 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5215 }
5216 }
5217 else
5218 {
5219 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5220 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5221 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",
5222 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5223 }
5224 RT_NOREF(pVCpu);
5225}
5226
5227
5228/**
5229 * Raises a FPU stack underflow exception.
5230 *
5231 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5232 * @param iStReg The destination register that should be loaded
5233 * with QNaN if \#IS is not masked. Specify
5234 * UINT8_MAX if none (like for fcom).
5235 * @param uFpuOpcode The FPU opcode value.
5236 */
5237void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5238{
5239 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5240 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5241 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5242}
5243
5244
5245void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5246{
5247 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5248 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5249 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5250 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5251}
5252
5253
5254void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5255{
5256 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5257 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5258 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5259 iemFpuMaybePopOne(pFpuCtx);
5260}
5261
5262
5263void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5264 uint16_t uFpuOpcode) RT_NOEXCEPT
5265{
5266 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5267 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5268 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5269 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5270 iemFpuMaybePopOne(pFpuCtx);
5271}
5272
5273
5274void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5275{
5276 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5277 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5278 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
5279 iemFpuMaybePopOne(pFpuCtx);
5280 iemFpuMaybePopOne(pFpuCtx);
5281}
5282
5283
5284void iemFpuStackPushUnderflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5285{
5286 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5287 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5288
5289 if (pFpuCtx->FCW & X86_FCW_IM)
5290 {
5291 /* Masked overflow - Push QNaN. */
5292 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5293 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5294 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5295 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5296 pFpuCtx->FTW |= RT_BIT(iNewTop);
5297 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5298 iemFpuRotateStackPush(pFpuCtx);
5299 }
5300 else
5301 {
5302 /* Exception pending - don't change TOP or the register stack. */
5303 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5304 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5305 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",
5306 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5307 }
5308}
5309
5310
5311void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5312{
5313 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5314 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5315
5316 if (pFpuCtx->FCW & X86_FCW_IM)
5317 {
5318 /* Masked overflow - Push QNaN. */
5319 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5320 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5321 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5322 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5323 pFpuCtx->FTW |= RT_BIT(iNewTop);
5324 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5325 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5326 iemFpuRotateStackPush(pFpuCtx);
5327 }
5328 else
5329 {
5330 /* Exception pending - don't change TOP or the register stack. */
5331 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5332 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5333 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",
5334 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5335 }
5336}
5337
5338
5339/**
5340 * Worker routine for raising an FPU stack overflow exception on a push.
5341 *
5342 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5343 * @param pFpuCtx The FPU context.
5344 */
5345static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5346{
5347 if (pFpuCtx->FCW & X86_FCW_IM)
5348 {
5349 /* Masked overflow. */
5350 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5351 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5352 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5353 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5354 pFpuCtx->FTW |= RT_BIT(iNewTop);
5355 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5356 iemFpuRotateStackPush(pFpuCtx);
5357 }
5358 else
5359 {
5360 /* Exception pending - don't change TOP or the register stack. */
5361 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5362 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5363 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",
5364 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5365 }
5366 RT_NOREF(pVCpu);
5367}
5368
5369
5370/**
5371 * Raises a FPU stack overflow exception on a push.
5372 *
5373 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5374 * @param uFpuOpcode The FPU opcode value.
5375 */
5376void iemFpuStackPushOverflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5377{
5378 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5379 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5380 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5381}
5382
5383
5384/**
5385 * Raises a FPU stack overflow exception on a push with a memory operand.
5386 *
5387 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5388 * @param iEffSeg The effective memory operand selector register.
5389 * @param GCPtrEff The effective memory operand offset.
5390 * @param uFpuOpcode The FPU opcode value.
5391 */
5392void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5393{
5394 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5395 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5396 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5397 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5398}
5399
5400/** @} */
5401
5402
5403/** @name SSE+AVX SIMD access and helpers.
5404 *
5405 * @{
5406 */
5407/**
5408 * Stores a result in a SIMD XMM register, updates the MXCSR.
5409 *
5410 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5411 * @param pResult The result to store.
5412 * @param iXmmReg Which SIMD XMM register to store the result in.
5413 */
5414void iemSseStoreResult(PVMCPUCC pVCpu, PCIEMSSERESULT pResult, uint8_t iXmmReg) RT_NOEXCEPT
5415{
5416 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5417 pFpuCtx->MXCSR |= pResult->MXCSR & X86_MXCSR_XCPT_FLAGS;
5418
5419 /* The result is only updated if there is no unmasked exception pending. */
5420 if (( ~((pFpuCtx->MXCSR & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT)
5421 & (pFpuCtx->MXCSR & X86_MXCSR_XCPT_FLAGS)) == 0)
5422 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXmmReg] = pResult->uResult;
5423}
5424
5425
5426/**
5427 * Updates the MXCSR.
5428 *
5429 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5430 * @param fMxcsr The new MXCSR value.
5431 */
5432void iemSseUpdateMxcsr(PVMCPUCC pVCpu, uint32_t fMxcsr) RT_NOEXCEPT
5433{
5434 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5435 pFpuCtx->MXCSR |= fMxcsr & X86_MXCSR_XCPT_FLAGS;
5436}
5437/** @} */
5438
5439
5440/** @name Memory access.
5441 *
5442 * @{
5443 */
5444
5445#undef LOG_GROUP
5446#define LOG_GROUP LOG_GROUP_IEM_MEM
5447
5448/**
5449 * Updates the IEMCPU::cbWritten counter if applicable.
5450 *
5451 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5452 * @param fAccess The access being accounted for.
5453 * @param cbMem The access size.
5454 */
5455DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5456{
5457 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5458 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5459 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5460}
5461
5462
5463/**
5464 * Applies the segment limit, base and attributes.
5465 *
5466 * This may raise a \#GP or \#SS.
5467 *
5468 * @returns VBox strict status code.
5469 *
5470 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5471 * @param fAccess The kind of access which is being performed.
5472 * @param iSegReg The index of the segment register to apply.
5473 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5474 * TSS, ++).
5475 * @param cbMem The access size.
5476 * @param pGCPtrMem Pointer to the guest memory address to apply
5477 * segmentation to. Input and output parameter.
5478 */
5479VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5480{
5481 if (iSegReg == UINT8_MAX)
5482 return VINF_SUCCESS;
5483
5484 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5485 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5486 switch (IEM_GET_CPU_MODE(pVCpu))
5487 {
5488 case IEMMODE_16BIT:
5489 case IEMMODE_32BIT:
5490 {
5491 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5492 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5493
5494 if ( pSel->Attr.n.u1Present
5495 && !pSel->Attr.n.u1Unusable)
5496 {
5497 Assert(pSel->Attr.n.u1DescType);
5498 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5499 {
5500 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5501 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5502 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5503
5504 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5505 {
5506 /** @todo CPL check. */
5507 }
5508
5509 /*
5510 * There are two kinds of data selectors, normal and expand down.
5511 */
5512 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5513 {
5514 if ( GCPtrFirst32 > pSel->u32Limit
5515 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5516 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5517 }
5518 else
5519 {
5520 /*
5521 * The upper boundary is defined by the B bit, not the G bit!
5522 */
5523 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5524 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5525 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5526 }
5527 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5528 }
5529 else
5530 {
5531 /*
5532 * Code selector and usually be used to read thru, writing is
5533 * only permitted in real and V8086 mode.
5534 */
5535 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5536 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5537 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5538 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5539 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5540
5541 if ( GCPtrFirst32 > pSel->u32Limit
5542 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5543 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5544
5545 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5546 {
5547 /** @todo CPL check. */
5548 }
5549
5550 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5551 }
5552 }
5553 else
5554 return iemRaiseGeneralProtectionFault0(pVCpu);
5555 return VINF_SUCCESS;
5556 }
5557
5558 case IEMMODE_64BIT:
5559 {
5560 RTGCPTR GCPtrMem = *pGCPtrMem;
5561 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5562 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5563
5564 Assert(cbMem >= 1);
5565 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5566 return VINF_SUCCESS;
5567 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5568 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5569 return iemRaiseGeneralProtectionFault0(pVCpu);
5570 }
5571
5572 default:
5573 AssertFailedReturn(VERR_IEM_IPE_7);
5574 }
5575}
5576
5577
5578/**
5579 * Translates a virtual address to a physical physical address and checks if we
5580 * can access the page as specified.
5581 *
5582 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5583 * @param GCPtrMem The virtual address.
5584 * @param cbAccess The access size, for raising \#PF correctly for
5585 * FXSAVE and such.
5586 * @param fAccess The intended access.
5587 * @param pGCPhysMem Where to return the physical address.
5588 */
5589VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess,
5590 uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5591{
5592 /** @todo Need a different PGM interface here. We're currently using
5593 * generic / REM interfaces. this won't cut it for R0. */
5594 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5595 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5596 * here. */
5597 PGMPTWALK Walk;
5598 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
5599 if (RT_FAILURE(rc))
5600 {
5601 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5602 /** @todo Check unassigned memory in unpaged mode. */
5603 /** @todo Reserved bits in page tables. Requires new PGM interface. */
5604#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5605 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5606 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5607#endif
5608 *pGCPhysMem = NIL_RTGCPHYS;
5609 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, rc);
5610 }
5611
5612 /* If the page is writable and does not have the no-exec bit set, all
5613 access is allowed. Otherwise we'll have to check more carefully... */
5614 if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
5615 {
5616 /* Write to read only memory? */
5617 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5618 && !(Walk.fEffective & X86_PTE_RW)
5619 && ( ( IEM_GET_CPL(pVCpu) == 3
5620 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5621 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
5622 {
5623 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5624 *pGCPhysMem = NIL_RTGCPHYS;
5625#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5626 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5627 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5628#endif
5629 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5630 }
5631
5632 /* Kernel memory accessed by userland? */
5633 if ( !(Walk.fEffective & X86_PTE_US)
5634 && IEM_GET_CPL(pVCpu) == 3
5635 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5636 {
5637 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5638 *pGCPhysMem = NIL_RTGCPHYS;
5639#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5640 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5641 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5642#endif
5643 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, VERR_ACCESS_DENIED);
5644 }
5645
5646 /* Executing non-executable memory? */
5647 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
5648 && (Walk.fEffective & X86_PTE_PAE_NX)
5649 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
5650 {
5651 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
5652 *pGCPhysMem = NIL_RTGCPHYS;
5653#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5654 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5655 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5656#endif
5657 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
5658 VERR_ACCESS_DENIED);
5659 }
5660 }
5661
5662 /*
5663 * Set the dirty / access flags.
5664 * ASSUMES this is set when the address is translated rather than on committ...
5665 */
5666 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5667 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
5668 if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty)
5669 {
5670 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
5671 AssertRC(rc2);
5672 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
5673 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
5674 }
5675
5676 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
5677 *pGCPhysMem = GCPhys;
5678 return VINF_SUCCESS;
5679}
5680
5681
5682/**
5683 * Looks up a memory mapping entry.
5684 *
5685 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
5686 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5687 * @param pvMem The memory address.
5688 * @param fAccess The access to.
5689 */
5690DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
5691{
5692 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5693 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
5694 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
5695 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5696 return 0;
5697 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
5698 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5699 return 1;
5700 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
5701 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5702 return 2;
5703 return VERR_NOT_FOUND;
5704}
5705
5706
5707/**
5708 * Finds a free memmap entry when using iNextMapping doesn't work.
5709 *
5710 * @returns Memory mapping index, 1024 on failure.
5711 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5712 */
5713static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
5714{
5715 /*
5716 * The easy case.
5717 */
5718 if (pVCpu->iem.s.cActiveMappings == 0)
5719 {
5720 pVCpu->iem.s.iNextMapping = 1;
5721 return 0;
5722 }
5723
5724 /* There should be enough mappings for all instructions. */
5725 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
5726
5727 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
5728 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
5729 return i;
5730
5731 AssertFailedReturn(1024);
5732}
5733
5734
5735/**
5736 * Commits a bounce buffer that needs writing back and unmaps it.
5737 *
5738 * @returns Strict VBox status code.
5739 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5740 * @param iMemMap The index of the buffer to commit.
5741 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
5742 * Always false in ring-3, obviously.
5743 */
5744static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
5745{
5746 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
5747 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
5748#ifdef IN_RING3
5749 Assert(!fPostponeFail);
5750 RT_NOREF_PV(fPostponeFail);
5751#endif
5752
5753 /*
5754 * Do the writing.
5755 */
5756 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5757 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
5758 {
5759 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
5760 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5761 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5762 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
5763 {
5764 /*
5765 * Carefully and efficiently dealing with access handler return
5766 * codes make this a little bloated.
5767 */
5768 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
5769 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5770 pbBuf,
5771 cbFirst,
5772 PGMACCESSORIGIN_IEM);
5773 if (rcStrict == VINF_SUCCESS)
5774 {
5775 if (cbSecond)
5776 {
5777 rcStrict = PGMPhysWrite(pVM,
5778 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5779 pbBuf + cbFirst,
5780 cbSecond,
5781 PGMACCESSORIGIN_IEM);
5782 if (rcStrict == VINF_SUCCESS)
5783 { /* nothing */ }
5784 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5785 {
5786 LogEx(LOG_GROUP_IEM,
5787 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
5788 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5789 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5790 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5791 }
5792#ifndef IN_RING3
5793 else if (fPostponeFail)
5794 {
5795 LogEx(LOG_GROUP_IEM,
5796 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5797 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5798 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5799 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5800 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5801 return iemSetPassUpStatus(pVCpu, rcStrict);
5802 }
5803#endif
5804 else
5805 {
5806 LogEx(LOG_GROUP_IEM,
5807 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5808 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5809 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5810 return rcStrict;
5811 }
5812 }
5813 }
5814 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5815 {
5816 if (!cbSecond)
5817 {
5818 LogEx(LOG_GROUP_IEM,
5819 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
5820 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5821 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5822 }
5823 else
5824 {
5825 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
5826 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5827 pbBuf + cbFirst,
5828 cbSecond,
5829 PGMACCESSORIGIN_IEM);
5830 if (rcStrict2 == VINF_SUCCESS)
5831 {
5832 LogEx(LOG_GROUP_IEM,
5833 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
5834 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5835 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5836 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5837 }
5838 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5839 {
5840 LogEx(LOG_GROUP_IEM,
5841 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
5842 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5843 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5844 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5845 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5846 }
5847#ifndef IN_RING3
5848 else if (fPostponeFail)
5849 {
5850 LogEx(LOG_GROUP_IEM,
5851 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5852 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5853 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5854 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5855 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5856 return iemSetPassUpStatus(pVCpu, rcStrict);
5857 }
5858#endif
5859 else
5860 {
5861 LogEx(LOG_GROUP_IEM,
5862 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5863 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5864 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5865 return rcStrict2;
5866 }
5867 }
5868 }
5869#ifndef IN_RING3
5870 else if (fPostponeFail)
5871 {
5872 LogEx(LOG_GROUP_IEM,
5873 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5874 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5875 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5876 if (!cbSecond)
5877 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
5878 else
5879 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
5880 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5881 return iemSetPassUpStatus(pVCpu, rcStrict);
5882 }
5883#endif
5884 else
5885 {
5886 LogEx(LOG_GROUP_IEM,
5887 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5888 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5889 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5890 return rcStrict;
5891 }
5892 }
5893 else
5894 {
5895 /*
5896 * No access handlers, much simpler.
5897 */
5898 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
5899 if (RT_SUCCESS(rc))
5900 {
5901 if (cbSecond)
5902 {
5903 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
5904 if (RT_SUCCESS(rc))
5905 { /* likely */ }
5906 else
5907 {
5908 LogEx(LOG_GROUP_IEM,
5909 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5910 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5911 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
5912 return rc;
5913 }
5914 }
5915 }
5916 else
5917 {
5918 LogEx(LOG_GROUP_IEM,
5919 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5920 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
5921 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5922 return rc;
5923 }
5924 }
5925 }
5926
5927#if defined(IEM_LOG_MEMORY_WRITES)
5928 Log5(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5929 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
5930 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
5931 Log5(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5932 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
5933 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
5934
5935 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5936 g_cbIemWrote = cbWrote;
5937 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
5938#endif
5939
5940 /*
5941 * Free the mapping entry.
5942 */
5943 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5944 Assert(pVCpu->iem.s.cActiveMappings != 0);
5945 pVCpu->iem.s.cActiveMappings--;
5946 return VINF_SUCCESS;
5947}
5948
5949
5950/**
5951 * iemMemMap worker that deals with a request crossing pages.
5952 */
5953static VBOXSTRICTRC
5954iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, uint8_t *pbUnmapInfo,
5955 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
5956{
5957 Assert(cbMem <= GUEST_PAGE_SIZE);
5958
5959 /*
5960 * Do the address translations.
5961 */
5962 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
5963 RTGCPHYS GCPhysFirst;
5964 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
5965 if (rcStrict != VINF_SUCCESS)
5966 return rcStrict;
5967 Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
5968
5969 uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
5970 RTGCPHYS GCPhysSecond;
5971 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
5972 cbSecondPage, fAccess, &GCPhysSecond);
5973 if (rcStrict != VINF_SUCCESS)
5974 return rcStrict;
5975 Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
5976 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
5977
5978 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5979
5980 /*
5981 * Read in the current memory content if it's a read, execute or partial
5982 * write access.
5983 */
5984 uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5985
5986 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5987 {
5988 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
5989 {
5990 /*
5991 * Must carefully deal with access handler status codes here,
5992 * makes the code a bit bloated.
5993 */
5994 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
5995 if (rcStrict == VINF_SUCCESS)
5996 {
5997 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
5998 if (rcStrict == VINF_SUCCESS)
5999 { /*likely */ }
6000 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6001 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6002 else
6003 {
6004 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
6005 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6006 return rcStrict;
6007 }
6008 }
6009 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6010 {
6011 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6012 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6013 {
6014 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6015 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6016 }
6017 else
6018 {
6019 LogEx(LOG_GROUP_IEM,
6020 ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6021 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6022 return rcStrict2;
6023 }
6024 }
6025 else
6026 {
6027 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6028 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6029 return rcStrict;
6030 }
6031 }
6032 else
6033 {
6034 /*
6035 * No informational status codes here, much more straight forward.
6036 */
6037 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6038 if (RT_SUCCESS(rc))
6039 {
6040 Assert(rc == VINF_SUCCESS);
6041 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6042 if (RT_SUCCESS(rc))
6043 Assert(rc == VINF_SUCCESS);
6044 else
6045 {
6046 LogEx(LOG_GROUP_IEM,
6047 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6048 return rc;
6049 }
6050 }
6051 else
6052 {
6053 LogEx(LOG_GROUP_IEM,
6054 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6055 return rc;
6056 }
6057 }
6058 }
6059#ifdef VBOX_STRICT
6060 else
6061 memset(pbBuf, 0xcc, cbMem);
6062 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6063 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6064#endif
6065 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
6066
6067 /*
6068 * Commit the bounce buffer entry.
6069 */
6070 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6071 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6072 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6073 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6074 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
6075 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6076 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6077 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6078 pVCpu->iem.s.cActiveMappings++;
6079
6080 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6081 *ppvMem = pbBuf;
6082 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6083 return VINF_SUCCESS;
6084}
6085
6086
6087/**
6088 * iemMemMap woker that deals with iemMemPageMap failures.
6089 */
6090static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem,
6091 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6092{
6093 /*
6094 * Filter out conditions we can handle and the ones which shouldn't happen.
6095 */
6096 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6097 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6098 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6099 {
6100 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6101 return rcMap;
6102 }
6103 pVCpu->iem.s.cPotentialExits++;
6104
6105 /*
6106 * Read in the current memory content if it's a read, execute or partial
6107 * write access.
6108 */
6109 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6110 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6111 {
6112 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6113 memset(pbBuf, 0xff, cbMem);
6114 else
6115 {
6116 int rc;
6117 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6118 {
6119 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6120 if (rcStrict == VINF_SUCCESS)
6121 { /* nothing */ }
6122 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6123 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6124 else
6125 {
6126 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6127 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6128 return rcStrict;
6129 }
6130 }
6131 else
6132 {
6133 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
6134 if (RT_SUCCESS(rc))
6135 { /* likely */ }
6136 else
6137 {
6138 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6139 GCPhysFirst, rc));
6140 return rc;
6141 }
6142 }
6143 }
6144 }
6145#ifdef VBOX_STRICT
6146 else
6147 memset(pbBuf, 0xcc, cbMem);
6148#endif
6149#ifdef VBOX_STRICT
6150 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6151 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6152#endif
6153
6154 /*
6155 * Commit the bounce buffer entry.
6156 */
6157 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6158 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6159 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6160 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
6161 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6162 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6163 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6164 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6165 pVCpu->iem.s.cActiveMappings++;
6166
6167 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6168 *ppvMem = pbBuf;
6169 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6170 return VINF_SUCCESS;
6171}
6172
6173
6174
6175/**
6176 * Maps the specified guest memory for the given kind of access.
6177 *
6178 * This may be using bounce buffering of the memory if it's crossing a page
6179 * boundary or if there is an access handler installed for any of it. Because
6180 * of lock prefix guarantees, we're in for some extra clutter when this
6181 * happens.
6182 *
6183 * This may raise a \#GP, \#SS, \#PF or \#AC.
6184 *
6185 * @returns VBox strict status code.
6186 *
6187 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6188 * @param ppvMem Where to return the pointer to the mapped memory.
6189 * @param pbUnmapInfo Where to return unmap info to be passed to
6190 * iemMemCommitAndUnmap or iemMemRollbackAndUnmap when
6191 * done.
6192 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
6193 * 8, 12, 16, 32 or 512. When used by string operations
6194 * it can be up to a page.
6195 * @param iSegReg The index of the segment register to use for this
6196 * access. The base and limits are checked. Use UINT8_MAX
6197 * to indicate that no segmentation is required (for IDT,
6198 * GDT and LDT accesses).
6199 * @param GCPtrMem The address of the guest memory.
6200 * @param fAccess How the memory is being accessed. The
6201 * IEM_ACCESS_TYPE_XXX bit is used to figure out how to map
6202 * the memory, while the IEM_ACCESS_WHAT_XXX bit is used
6203 * when raising exceptions.
6204 * @param uAlignCtl Alignment control:
6205 * - Bits 15:0 is the alignment mask.
6206 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6207 * IEM_MEMMAP_F_ALIGN_SSE, and
6208 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6209 * Pass zero to skip alignment.
6210 */
6211VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
6212 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
6213{
6214 /*
6215 * Check the input and figure out which mapping entry to use.
6216 */
6217 Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));
6218 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94
6219 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );
6220 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6221 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6222
6223 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6224 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6225 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6226 {
6227 iMemMap = iemMemMapFindFree(pVCpu);
6228 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6229 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6230 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6231 pVCpu->iem.s.aMemMappings[2].fAccess),
6232 VERR_IEM_IPE_9);
6233 }
6234
6235 /*
6236 * Map the memory, checking that we can actually access it. If something
6237 * slightly complicated happens, fall back on bounce buffering.
6238 */
6239 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6240 if (rcStrict == VINF_SUCCESS)
6241 { /* likely */ }
6242 else
6243 return rcStrict;
6244
6245 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
6246 { /* likely */ }
6247 else
6248 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
6249
6250 /*
6251 * Alignment check.
6252 */
6253 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6254 { /* likelyish */ }
6255 else
6256 {
6257 /* Misaligned access. */
6258 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6259 {
6260 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6261 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6262 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6263 {
6264 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6265
6266 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6267 return iemRaiseAlignmentCheckException(pVCpu);
6268 }
6269 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6270 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6271 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6272 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6273 * that's what FXSAVE does on a 10980xe. */
6274 && iemMemAreAlignmentChecksEnabled(pVCpu))
6275 return iemRaiseAlignmentCheckException(pVCpu);
6276 else
6277 return iemRaiseGeneralProtectionFault0(pVCpu);
6278 }
6279 }
6280
6281#ifdef IEM_WITH_DATA_TLB
6282 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6283
6284 /*
6285 * Get the TLB entry for this page.
6286 */
6287 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6288 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6289 if (pTlbe->uTag == uTag)
6290 {
6291# ifdef VBOX_WITH_STATISTICS
6292 pVCpu->iem.s.DataTlb.cTlbHits++;
6293# endif
6294 }
6295 else
6296 {
6297 pVCpu->iem.s.DataTlb.cTlbMisses++;
6298 PGMPTWALK Walk;
6299 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6300 if (RT_FAILURE(rc))
6301 {
6302 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6303# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6304 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6305 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6306# endif
6307 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6308 }
6309
6310 Assert(Walk.fSucceeded);
6311 pTlbe->uTag = uTag;
6312 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6313 pTlbe->GCPhys = Walk.GCPhys;
6314 pTlbe->pbMappingR3 = NULL;
6315 }
6316
6317 /*
6318 * Check TLB page table level access flags.
6319 */
6320 /* If the page is either supervisor only or non-writable, we need to do
6321 more careful access checks. */
6322 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
6323 {
6324 /* Write to read only memory? */
6325 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
6326 && (fAccess & IEM_ACCESS_TYPE_WRITE)
6327 && ( ( IEM_GET_CPL(pVCpu) == 3
6328 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6329 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
6330 {
6331 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6332# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6333 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6334 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6335# endif
6336 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6337 }
6338
6339 /* Kernel memory accessed by userland? */
6340 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6341 && IEM_GET_CPL(pVCpu) == 3
6342 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6343 {
6344 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6345# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6346 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6347 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6348# endif
6349 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6350 }
6351 }
6352
6353 /*
6354 * Set the dirty / access flags.
6355 * ASSUMES this is set when the address is translated rather than on commit...
6356 */
6357 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6358 uint64_t const fTlbAccessedDirty = (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0) | IEMTLBE_F_PT_NO_ACCESSED;
6359 if (pTlbe->fFlagsAndPhysRev & fTlbAccessedDirty)
6360 {
6361 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6362 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6363 AssertRC(rc2);
6364 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6365 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6366 pTlbe->fFlagsAndPhysRev &= ~fTlbAccessedDirty;
6367 }
6368
6369 /*
6370 * Look up the physical page info if necessary.
6371 */
6372 uint8_t *pbMem = NULL;
6373 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6374# ifdef IN_RING3
6375 pbMem = pTlbe->pbMappingR3;
6376# else
6377 pbMem = NULL;
6378# endif
6379 else
6380 {
6381 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6382 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6383 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6384 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6385 AssertCompile(PGMIEMGCPHYS2PTR_F_CODE_PAGE == IEMTLBE_F_PG_CODE_PAGE);
6386 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
6387 { /* likely */ }
6388 else
6389 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
6390 pTlbe->pbMappingR3 = NULL;
6391 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6392 | IEMTLBE_F_NO_MAPPINGR3
6393 | IEMTLBE_F_PG_NO_READ
6394 | IEMTLBE_F_PG_NO_WRITE
6395 | IEMTLBE_F_PG_UNASSIGNED
6396 | IEMTLBE_F_PG_CODE_PAGE);
6397 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6398 &pbMem, &pTlbe->fFlagsAndPhysRev);
6399 AssertRCReturn(rc, rc);
6400# ifdef IN_RING3
6401 pTlbe->pbMappingR3 = pbMem;
6402# endif
6403 }
6404
6405 /*
6406 * Check the physical page level access and mapping.
6407 */
6408 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
6409 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
6410 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
6411 { /* probably likely */ }
6412 else
6413 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem,
6414 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6415 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6416 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6417 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6418 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6419
6420 if (pbMem)
6421 {
6422 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6423 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6424 fAccess |= IEM_ACCESS_NOT_LOCKED;
6425 }
6426 else
6427 {
6428 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6429 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6430 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6431 if (rcStrict != VINF_SUCCESS)
6432 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6433 }
6434
6435 void * const pvMem = pbMem;
6436
6437 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6438 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6439 if (fAccess & IEM_ACCESS_TYPE_READ)
6440 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6441
6442#else /* !IEM_WITH_DATA_TLB */
6443
6444 RTGCPHYS GCPhysFirst;
6445 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6446 if (rcStrict != VINF_SUCCESS)
6447 return rcStrict;
6448
6449 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6450 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6451 if (fAccess & IEM_ACCESS_TYPE_READ)
6452 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6453
6454 void *pvMem;
6455 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6456 if (rcStrict != VINF_SUCCESS)
6457 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6458
6459#endif /* !IEM_WITH_DATA_TLB */
6460
6461 /*
6462 * Fill in the mapping table entry.
6463 */
6464 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6465 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6466 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6467 pVCpu->iem.s.cActiveMappings += 1;
6468
6469 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6470 *ppvMem = pvMem;
6471 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6472 AssertCompile(IEM_ACCESS_TYPE_MASK <= 0xf);
6473 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.aMemMappings) < 8);
6474
6475 return VINF_SUCCESS;
6476}
6477
6478
6479/**
6480 * Commits the guest memory if bounce buffered and unmaps it.
6481 *
6482 * @returns Strict VBox status code.
6483 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6484 * @param bUnmapInfo Unmap info set by iemMemMap.
6485 */
6486VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6487{
6488 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6489 AssertMsgReturn( (bUnmapInfo & 0x08)
6490 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6491 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf)) == ((unsigned)bUnmapInfo >> 4),
6492 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
6493 VERR_NOT_FOUND);
6494
6495 /* If it's bounce buffered, we may need to write back the buffer. */
6496 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6497 {
6498 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6499 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6500 }
6501 /* Otherwise unlock it. */
6502 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6503 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6504
6505 /* Free the entry. */
6506 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6507 Assert(pVCpu->iem.s.cActiveMappings != 0);
6508 pVCpu->iem.s.cActiveMappings--;
6509 return VINF_SUCCESS;
6510}
6511
6512
6513/**
6514 * Rolls back the guest memory (conceptually only) and unmaps it.
6515 *
6516 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6517 * @param bUnmapInfo Unmap info set by iemMemMap.
6518 */
6519void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6520{
6521 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6522 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
6523 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6524 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
6525 == ((unsigned)bUnmapInfo >> 4),
6526 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
6527
6528 /* Unlock it if necessary. */
6529 if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6530 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6531
6532 /* Free the entry. */
6533 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6534 Assert(pVCpu->iem.s.cActiveMappings != 0);
6535 pVCpu->iem.s.cActiveMappings--;
6536}
6537
6538#ifdef IEM_WITH_SETJMP
6539
6540/**
6541 * Maps the specified guest memory for the given kind of access, longjmp on
6542 * error.
6543 *
6544 * This may be using bounce buffering of the memory if it's crossing a page
6545 * boundary or if there is an access handler installed for any of it. Because
6546 * of lock prefix guarantees, we're in for some extra clutter when this
6547 * happens.
6548 *
6549 * This may raise a \#GP, \#SS, \#PF or \#AC.
6550 *
6551 * @returns Pointer to the mapped memory.
6552 *
6553 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6554 * @param bUnmapInfo Where to return unmap info to be passed to
6555 * iemMemCommitAndUnmapJmp, iemMemCommitAndUnmapRwSafeJmp,
6556 * iemMemCommitAndUnmapWoSafeJmp,
6557 * iemMemCommitAndUnmapRoSafeJmp,
6558 * iemMemRollbackAndUnmapWoSafe or iemMemRollbackAndUnmap
6559 * when done.
6560 * @param cbMem The number of bytes to map. This is usually 1,
6561 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6562 * string operations it can be up to a page.
6563 * @param iSegReg The index of the segment register to use for
6564 * this access. The base and limits are checked.
6565 * Use UINT8_MAX to indicate that no segmentation
6566 * is required (for IDT, GDT and LDT accesses).
6567 * @param GCPtrMem The address of the guest memory.
6568 * @param fAccess How the memory is being accessed. The
6569 * IEM_ACCESS_TYPE_XXX bit is used to figure out
6570 * how to map the memory, while the
6571 * IEM_ACCESS_WHAT_XXX bit is used when raising
6572 * exceptions.
6573 * @param uAlignCtl Alignment control:
6574 * - Bits 15:0 is the alignment mask.
6575 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6576 * IEM_MEMMAP_F_ALIGN_SSE, and
6577 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6578 * Pass zero to skip alignment.
6579 */
6580void *iemMemMapJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess,
6581 uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
6582{
6583 /*
6584 * Check the input, check segment access and adjust address
6585 * with segment base.
6586 */
6587 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6588 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6589 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6590
6591 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6592 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6593 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6594
6595 /*
6596 * Alignment check.
6597 */
6598 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6599 { /* likelyish */ }
6600 else
6601 {
6602 /* Misaligned access. */
6603 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6604 {
6605 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6606 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6607 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6608 {
6609 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6610
6611 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6612 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6613 }
6614 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6615 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6616 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6617 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6618 * that's what FXSAVE does on a 10980xe. */
6619 && iemMemAreAlignmentChecksEnabled(pVCpu))
6620 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6621 else
6622 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
6623 }
6624 }
6625
6626 /*
6627 * Figure out which mapping entry to use.
6628 */
6629 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6630 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6631 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6632 {
6633 iMemMap = iemMemMapFindFree(pVCpu);
6634 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6635 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6636 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6637 pVCpu->iem.s.aMemMappings[2].fAccess),
6638 IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_9));
6639 }
6640
6641 /*
6642 * Crossing a page boundary?
6643 */
6644 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
6645 { /* No (likely). */ }
6646 else
6647 {
6648 void *pvMem;
6649 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
6650 if (rcStrict == VINF_SUCCESS)
6651 return pvMem;
6652 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6653 }
6654
6655#ifdef IEM_WITH_DATA_TLB
6656 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6657
6658 /*
6659 * Get the TLB entry for this page.
6660 */
6661 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6662 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6663 if (pTlbe->uTag == uTag)
6664 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6665 else
6666 {
6667 pVCpu->iem.s.DataTlb.cTlbMisses++;
6668 PGMPTWALK Walk;
6669 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6670 if (RT_FAILURE(rc))
6671 {
6672 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6673# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6674 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6675 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6676# endif
6677 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6678 }
6679
6680 Assert(Walk.fSucceeded);
6681 pTlbe->uTag = uTag;
6682 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6683 pTlbe->GCPhys = Walk.GCPhys;
6684 pTlbe->pbMappingR3 = NULL;
6685 }
6686
6687 /*
6688 * Check the flags and physical revision.
6689 */
6690 /** @todo make the caller pass these in with fAccess. */
6691 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && IEM_GET_CPL(pVCpu) == 3
6692 ? IEMTLBE_F_PT_NO_USER : 0;
6693 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
6694 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
6695 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
6696 || (IEM_GET_CPL(pVCpu) == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6697 ? IEMTLBE_F_PT_NO_WRITE : 0)
6698 : 0;
6699 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
6700 uint8_t *pbMem = NULL;
6701 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
6702 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6703# ifdef IN_RING3
6704 pbMem = pTlbe->pbMappingR3;
6705# else
6706 pbMem = NULL;
6707# endif
6708 else
6709 {
6710 /*
6711 * Okay, something isn't quite right or needs refreshing.
6712 */
6713 /* Write to read only memory? */
6714 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
6715 {
6716 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6717# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6718 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6719 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6720# endif
6721 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6722 }
6723
6724 /* Kernel memory accessed by userland? */
6725 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
6726 {
6727 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6728# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6729 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6730 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6731# endif
6732 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6733 }
6734
6735 /* Set the dirty / access flags.
6736 ASSUMES this is set when the address is translated rather than on commit... */
6737 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6738 if (pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED))
6739 {
6740 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6741 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6742 AssertRC(rc2);
6743 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6744 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6745 pTlbe->fFlagsAndPhysRev &= ~((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED);
6746 }
6747
6748 /*
6749 * Check if the physical page info needs updating.
6750 */
6751 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6752# ifdef IN_RING3
6753 pbMem = pTlbe->pbMappingR3;
6754# else
6755 pbMem = NULL;
6756# endif
6757 else
6758 {
6759 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6760 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6761 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6762 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6763 AssertCompile(PGMIEMGCPHYS2PTR_F_CODE_PAGE == IEMTLBE_F_PG_CODE_PAGE);
6764 pTlbe->pbMappingR3 = NULL;
6765 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6766 | IEMTLBE_F_NO_MAPPINGR3
6767 | IEMTLBE_F_PG_NO_READ
6768 | IEMTLBE_F_PG_NO_WRITE
6769 | IEMTLBE_F_PG_UNASSIGNED
6770 | IEMTLBE_F_PG_CODE_PAGE);
6771 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6772 &pbMem, &pTlbe->fFlagsAndPhysRev);
6773 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
6774# ifdef IN_RING3
6775 pTlbe->pbMappingR3 = pbMem;
6776# endif
6777 }
6778
6779 /*
6780 * Check the physical page level access and mapping.
6781 */
6782 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
6783 { /* probably likely */ }
6784 else
6785 {
6786 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, pbUnmapInfo, cbMem,
6787 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6788 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6789 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6790 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6791 if (rcStrict == VINF_SUCCESS)
6792 return pbMem;
6793 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6794 }
6795 }
6796 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6797
6798 if (pbMem)
6799 {
6800 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6801 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6802 fAccess |= IEM_ACCESS_NOT_LOCKED;
6803 }
6804 else
6805 {
6806 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6807 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6808 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6809 if (rcStrict == VINF_SUCCESS)
6810 {
6811 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6812 return pbMem;
6813 }
6814 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6815 }
6816
6817 void * const pvMem = pbMem;
6818
6819 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6820 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6821 if (fAccess & IEM_ACCESS_TYPE_READ)
6822 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6823
6824#else /* !IEM_WITH_DATA_TLB */
6825
6826
6827 RTGCPHYS GCPhysFirst;
6828 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6829 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6830 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6831
6832 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6833 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6834 if (fAccess & IEM_ACCESS_TYPE_READ)
6835 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6836
6837 void *pvMem;
6838 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6839 if (rcStrict == VINF_SUCCESS)
6840 { /* likely */ }
6841 else
6842 {
6843 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6844 if (rcStrict == VINF_SUCCESS)
6845 return pvMem;
6846 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6847 }
6848
6849#endif /* !IEM_WITH_DATA_TLB */
6850
6851 /*
6852 * Fill in the mapping table entry.
6853 */
6854 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6855 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6856 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6857 pVCpu->iem.s.cActiveMappings++;
6858
6859 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6860
6861 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6862 return pvMem;
6863}
6864
6865
6866/**
6867 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
6868 *
6869 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6870 * @param pvMem The mapping.
6871 * @param fAccess The kind of access.
6872 */
6873void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
6874{
6875 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6876 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
6877 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6878 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
6879 == ((unsigned)bUnmapInfo >> 4),
6880 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
6881
6882 /* If it's bounce buffered, we may need to write back the buffer. */
6883 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6884 {
6885 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6886 {
6887 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6888 if (rcStrict == VINF_SUCCESS)
6889 return;
6890 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6891 }
6892 }
6893 /* Otherwise unlock it. */
6894 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6895 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6896
6897 /* Free the entry. */
6898 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6899 Assert(pVCpu->iem.s.cActiveMappings != 0);
6900 pVCpu->iem.s.cActiveMappings--;
6901}
6902
6903
6904/** Fallback for iemMemCommitAndUnmapRwJmp. */
6905void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
6906{
6907 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
6908 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
6909}
6910
6911
6912/** Fallback for iemMemCommitAndUnmapWoJmp. */
6913void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
6914{
6915 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
6916 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
6917}
6918
6919
6920/** Fallback for iemMemCommitAndUnmapRoJmp. */
6921void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
6922{
6923 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_READ);
6924 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
6925}
6926
6927
6928/** Fallback for iemMemRollbackAndUnmapWo. */
6929void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6930{
6931 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
6932 iemMemRollbackAndUnmap(pVCpu, bUnmapInfo);
6933}
6934
6935#endif /* IEM_WITH_SETJMP */
6936
6937#ifndef IN_RING3
6938/**
6939 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
6940 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
6941 *
6942 * Allows the instruction to be completed and retired, while the IEM user will
6943 * return to ring-3 immediately afterwards and do the postponed writes there.
6944 *
6945 * @returns VBox status code (no strict statuses). Caller must check
6946 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
6947 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6948 * @param pvMem The mapping.
6949 * @param fAccess The kind of access.
6950 */
6951VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6952{
6953 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6954 AssertMsgReturn( (bUnmapInfo & 0x08)
6955 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6956 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
6957 == ((unsigned)bUnmapInfo >> 4),
6958 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
6959 VERR_NOT_FOUND);
6960
6961 /* If it's bounce buffered, we may need to write back the buffer. */
6962 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6963 {
6964 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6965 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
6966 }
6967 /* Otherwise unlock it. */
6968 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6969 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6970
6971 /* Free the entry. */
6972 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6973 Assert(pVCpu->iem.s.cActiveMappings != 0);
6974 pVCpu->iem.s.cActiveMappings--;
6975 return VINF_SUCCESS;
6976}
6977#endif
6978
6979
6980/**
6981 * Rollbacks mappings, releasing page locks and such.
6982 *
6983 * The caller shall only call this after checking cActiveMappings.
6984 *
6985 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6986 */
6987void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
6988{
6989 Assert(pVCpu->iem.s.cActiveMappings > 0);
6990
6991 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
6992 while (iMemMap-- > 0)
6993 {
6994 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
6995 if (fAccess != IEM_ACCESS_INVALID)
6996 {
6997 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
6998 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6999 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
7000 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7001 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
7002 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
7003 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
7004 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
7005 pVCpu->iem.s.cActiveMappings--;
7006 }
7007 }
7008}
7009
7010
7011/*
7012 * Instantiate R/W templates.
7013 */
7014#define TMPL_MEM_WITH_STACK
7015
7016#define TMPL_MEM_TYPE uint8_t
7017#define TMPL_MEM_FN_SUFF U8
7018#define TMPL_MEM_FMT_TYPE "%#04x"
7019#define TMPL_MEM_FMT_DESC "byte"
7020#include "IEMAllMemRWTmpl.cpp.h"
7021
7022#define TMPL_MEM_TYPE uint16_t
7023#define TMPL_MEM_FN_SUFF U16
7024#define TMPL_MEM_FMT_TYPE "%#06x"
7025#define TMPL_MEM_FMT_DESC "word"
7026#include "IEMAllMemRWTmpl.cpp.h"
7027
7028#define TMPL_WITH_PUSH_SREG
7029#define TMPL_MEM_TYPE uint32_t
7030#define TMPL_MEM_FN_SUFF U32
7031#define TMPL_MEM_FMT_TYPE "%#010x"
7032#define TMPL_MEM_FMT_DESC "dword"
7033#include "IEMAllMemRWTmpl.cpp.h"
7034#undef TMPL_WITH_PUSH_SREG
7035
7036#define TMPL_MEM_TYPE uint64_t
7037#define TMPL_MEM_FN_SUFF U64
7038#define TMPL_MEM_FMT_TYPE "%#018RX64"
7039#define TMPL_MEM_FMT_DESC "qword"
7040#include "IEMAllMemRWTmpl.cpp.h"
7041
7042#undef TMPL_MEM_WITH_STACK
7043
7044#define TMPL_MEM_TYPE uint64_t
7045#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) * 2 - 1)
7046#define TMPL_MEM_FN_SUFF U64AlignedU128
7047#define TMPL_MEM_FMT_TYPE "%#018RX64"
7048#define TMPL_MEM_FMT_DESC "qword"
7049#include "IEMAllMemRWTmpl.cpp.h"
7050
7051/* See IEMAllMemRWTmplInline.cpp.h */
7052#define TMPL_MEM_BY_REF
7053
7054#define TMPL_MEM_TYPE RTFLOAT80U
7055#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1)
7056#define TMPL_MEM_FN_SUFF R80
7057#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
7058#define TMPL_MEM_FMT_DESC "tword"
7059#include "IEMAllMemRWTmpl.cpp.h"
7060
7061#define TMPL_MEM_TYPE RTPBCD80U
7062#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1) /** @todo testcase: 80-bit BCD alignment */
7063#define TMPL_MEM_FN_SUFF D80
7064#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
7065#define TMPL_MEM_FMT_DESC "tword"
7066#include "IEMAllMemRWTmpl.cpp.h"
7067
7068#define TMPL_MEM_TYPE RTUINT128U
7069#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT128U) - 1)
7070#define TMPL_MEM_FN_SUFF U128
7071#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7072#define TMPL_MEM_FMT_DESC "dqword"
7073#include "IEMAllMemRWTmpl.cpp.h"
7074
7075
7076/**
7077 * Fetches a data dword and zero extends it to a qword.
7078 *
7079 * @returns Strict VBox status code.
7080 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7081 * @param pu64Dst Where to return the qword.
7082 * @param iSegReg The index of the segment register to use for
7083 * this access. The base and limits are checked.
7084 * @param GCPtrMem The address of the guest memory.
7085 */
7086VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7087{
7088 /* The lazy approach for now... */
7089 uint8_t bUnmapInfo;
7090 uint32_t const *pu32Src;
7091 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem,
7092 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7093 if (rc == VINF_SUCCESS)
7094 {
7095 *pu64Dst = *pu32Src;
7096 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7097 Log(("IEM RD dword %d|%RGv: %#010RX64\n", iSegReg, GCPtrMem, *pu64Dst));
7098 }
7099 return rc;
7100}
7101
7102
7103#ifdef SOME_UNUSED_FUNCTION
7104/**
7105 * Fetches a data dword and sign extends it to a qword.
7106 *
7107 * @returns Strict VBox status code.
7108 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7109 * @param pu64Dst Where to return the sign extended value.
7110 * @param iSegReg The index of the segment register to use for
7111 * this access. The base and limits are checked.
7112 * @param GCPtrMem The address of the guest memory.
7113 */
7114VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7115{
7116 /* The lazy approach for now... */
7117 uint8_t bUnmapInfo;
7118 int32_t const *pi32Src;
7119 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, &bUnmapInfo, sizeof(*pi32Src), iSegReg, GCPtrMem,
7120 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
7121 if (rc == VINF_SUCCESS)
7122 {
7123 *pu64Dst = *pi32Src;
7124 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7125 Log(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, (uint32_t)*pu64Dst));
7126 }
7127#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7128 else
7129 *pu64Dst = 0;
7130#endif
7131 return rc;
7132}
7133#endif
7134
7135
7136/**
7137 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7138 * related.
7139 *
7140 * Raises \#GP(0) if not aligned.
7141 *
7142 * @returns Strict VBox status code.
7143 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7144 * @param pu128Dst Where to return the qword.
7145 * @param iSegReg The index of the segment register to use for
7146 * this access. The base and limits are checked.
7147 * @param GCPtrMem The address of the guest memory.
7148 */
7149VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7150{
7151 /* The lazy approach for now... */
7152 uint8_t bUnmapInfo;
7153 PCRTUINT128U pu128Src;
7154 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, &bUnmapInfo, sizeof(*pu128Src), iSegReg, GCPtrMem,
7155 IEM_ACCESS_DATA_R, (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7156 if (rc == VINF_SUCCESS)
7157 {
7158 pu128Dst->au64[0] = pu128Src->au64[0];
7159 pu128Dst->au64[1] = pu128Src->au64[1];
7160 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7161 Log(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7162 }
7163 return rc;
7164}
7165
7166
7167#ifdef IEM_WITH_SETJMP
7168/**
7169 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7170 * related, longjmp on error.
7171 *
7172 * Raises \#GP(0) if not aligned.
7173 *
7174 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7175 * @param pu128Dst Where to return the qword.
7176 * @param iSegReg The index of the segment register to use for
7177 * this access. The base and limits are checked.
7178 * @param GCPtrMem The address of the guest memory.
7179 */
7180void iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg,
7181 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7182{
7183 /* The lazy approach for now... */
7184 uint8_t bUnmapInfo;
7185 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7186 (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7187 pu128Dst->au64[0] = pu128Src->au64[0];
7188 pu128Dst->au64[1] = pu128Src->au64[1];
7189 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7190 Log(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7191}
7192#endif
7193
7194
7195/**
7196 * Fetches a data oword (octo word), generally AVX related.
7197 *
7198 * @returns Strict VBox status code.
7199 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7200 * @param pu256Dst Where to return the qword.
7201 * @param iSegReg The index of the segment register to use for
7202 * this access. The base and limits are checked.
7203 * @param GCPtrMem The address of the guest memory.
7204 */
7205VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7206{
7207 /* The lazy approach for now... */
7208 uint8_t bUnmapInfo;
7209 PCRTUINT256U pu256Src;
7210 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, &bUnmapInfo, sizeof(*pu256Src), iSegReg, GCPtrMem,
7211 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7212 if (rc == VINF_SUCCESS)
7213 {
7214 pu256Dst->au64[0] = pu256Src->au64[0];
7215 pu256Dst->au64[1] = pu256Src->au64[1];
7216 pu256Dst->au64[2] = pu256Src->au64[2];
7217 pu256Dst->au64[3] = pu256Src->au64[3];
7218 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7219 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7220 }
7221 return rc;
7222}
7223
7224
7225#ifdef IEM_WITH_SETJMP
7226/**
7227 * Fetches a data oword (octo word), generally AVX related.
7228 *
7229 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7230 * @param pu256Dst Where to return the qword.
7231 * @param iSegReg The index of the segment register to use for
7232 * this access. The base and limits are checked.
7233 * @param GCPtrMem The address of the guest memory.
7234 */
7235void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7236{
7237 /* The lazy approach for now... */
7238 uint8_t bUnmapInfo;
7239 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Src), iSegReg, GCPtrMem,
7240 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7241 pu256Dst->au64[0] = pu256Src->au64[0];
7242 pu256Dst->au64[1] = pu256Src->au64[1];
7243 pu256Dst->au64[2] = pu256Src->au64[2];
7244 pu256Dst->au64[3] = pu256Src->au64[3];
7245 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7246 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7247}
7248#endif
7249
7250
7251/**
7252 * Fetches a data oword (octo word) at an aligned address, generally AVX
7253 * related.
7254 *
7255 * Raises \#GP(0) if not aligned.
7256 *
7257 * @returns Strict VBox status code.
7258 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7259 * @param pu256Dst Where to return the qword.
7260 * @param iSegReg The index of the segment register to use for
7261 * this access. The base and limits are checked.
7262 * @param GCPtrMem The address of the guest memory.
7263 */
7264VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7265{
7266 /* The lazy approach for now... */
7267 uint8_t bUnmapInfo;
7268 PCRTUINT256U pu256Src;
7269 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, &bUnmapInfo, sizeof(*pu256Src), iSegReg, GCPtrMem,
7270 IEM_ACCESS_DATA_R, (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7271 if (rc == VINF_SUCCESS)
7272 {
7273 pu256Dst->au64[0] = pu256Src->au64[0];
7274 pu256Dst->au64[1] = pu256Src->au64[1];
7275 pu256Dst->au64[2] = pu256Src->au64[2];
7276 pu256Dst->au64[3] = pu256Src->au64[3];
7277 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7278 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7279 }
7280 return rc;
7281}
7282
7283
7284#ifdef IEM_WITH_SETJMP
7285/**
7286 * Fetches a data oword (octo word) at an aligned address, generally AVX
7287 * related, longjmp on error.
7288 *
7289 * Raises \#GP(0) if not aligned.
7290 *
7291 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7292 * @param pu256Dst Where to return the qword.
7293 * @param iSegReg The index of the segment register to use for
7294 * this access. The base and limits are checked.
7295 * @param GCPtrMem The address of the guest memory.
7296 */
7297void iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg,
7298 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7299{
7300 /* The lazy approach for now... */
7301 uint8_t bUnmapInfo;
7302 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7303 (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7304 pu256Dst->au64[0] = pu256Src->au64[0];
7305 pu256Dst->au64[1] = pu256Src->au64[1];
7306 pu256Dst->au64[2] = pu256Src->au64[2];
7307 pu256Dst->au64[3] = pu256Src->au64[3];
7308 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7309 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7310}
7311#endif
7312
7313
7314
7315/**
7316 * Fetches a descriptor register (lgdt, lidt).
7317 *
7318 * @returns Strict VBox status code.
7319 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7320 * @param pcbLimit Where to return the limit.
7321 * @param pGCPtrBase Where to return the base.
7322 * @param iSegReg The index of the segment register to use for
7323 * this access. The base and limits are checked.
7324 * @param GCPtrMem The address of the guest memory.
7325 * @param enmOpSize The effective operand size.
7326 */
7327VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7328 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7329{
7330 /*
7331 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7332 * little special:
7333 * - The two reads are done separately.
7334 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7335 * - We suspect the 386 to actually commit the limit before the base in
7336 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7337 * don't try emulate this eccentric behavior, because it's not well
7338 * enough understood and rather hard to trigger.
7339 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7340 */
7341 VBOXSTRICTRC rcStrict;
7342 if (IEM_IS_64BIT_CODE(pVCpu))
7343 {
7344 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7345 if (rcStrict == VINF_SUCCESS)
7346 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7347 }
7348 else
7349 {
7350 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7351 if (enmOpSize == IEMMODE_32BIT)
7352 {
7353 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7354 {
7355 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7356 if (rcStrict == VINF_SUCCESS)
7357 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7358 }
7359 else
7360 {
7361 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7362 if (rcStrict == VINF_SUCCESS)
7363 {
7364 *pcbLimit = (uint16_t)uTmp;
7365 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7366 }
7367 }
7368 if (rcStrict == VINF_SUCCESS)
7369 *pGCPtrBase = uTmp;
7370 }
7371 else
7372 {
7373 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7374 if (rcStrict == VINF_SUCCESS)
7375 {
7376 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7377 if (rcStrict == VINF_SUCCESS)
7378 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7379 }
7380 }
7381 }
7382 return rcStrict;
7383}
7384
7385
7386/**
7387 * Stores a data dqword, SSE aligned.
7388 *
7389 * @returns Strict VBox status code.
7390 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7391 * @param iSegReg The index of the segment register to use for
7392 * this access. The base and limits are checked.
7393 * @param GCPtrMem The address of the guest memory.
7394 * @param u128Value The value to store.
7395 */
7396VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7397{
7398 /* The lazy approach for now... */
7399 uint8_t bUnmapInfo;
7400 PRTUINT128U pu128Dst;
7401 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7402 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7403 if (rc == VINF_SUCCESS)
7404 {
7405 pu128Dst->au64[0] = u128Value.au64[0];
7406 pu128Dst->au64[1] = u128Value.au64[1];
7407 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7408 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7409 }
7410 return rc;
7411}
7412
7413
7414#ifdef IEM_WITH_SETJMP
7415/**
7416 * Stores a data dqword, SSE aligned.
7417 *
7418 * @returns Strict VBox status code.
7419 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7420 * @param iSegReg The index of the segment register to use for
7421 * this access. The base and limits are checked.
7422 * @param GCPtrMem The address of the guest memory.
7423 * @param u128Value The value to store.
7424 */
7425void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7426 RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7427{
7428 /* The lazy approach for now... */
7429 uint8_t bUnmapInfo;
7430 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7431 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7432 pu128Dst->au64[0] = u128Value.au64[0];
7433 pu128Dst->au64[1] = u128Value.au64[1];
7434 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7435 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7436}
7437#endif
7438
7439
7440/**
7441 * Stores a data dqword.
7442 *
7443 * @returns Strict VBox status code.
7444 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7445 * @param iSegReg The index of the segment register to use for
7446 * this access. The base and limits are checked.
7447 * @param GCPtrMem The address of the guest memory.
7448 * @param pu256Value Pointer to the value to store.
7449 */
7450VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7451{
7452 /* The lazy approach for now... */
7453 uint8_t bUnmapInfo;
7454 PRTUINT256U pu256Dst;
7455 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7456 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7457 if (rc == VINF_SUCCESS)
7458 {
7459 pu256Dst->au64[0] = pu256Value->au64[0];
7460 pu256Dst->au64[1] = pu256Value->au64[1];
7461 pu256Dst->au64[2] = pu256Value->au64[2];
7462 pu256Dst->au64[3] = pu256Value->au64[3];
7463 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7464 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7465 }
7466 return rc;
7467}
7468
7469
7470#ifdef IEM_WITH_SETJMP
7471/**
7472 * Stores a data dqword, longjmp on error.
7473 *
7474 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7475 * @param iSegReg The index of the segment register to use for
7476 * this access. The base and limits are checked.
7477 * @param GCPtrMem The address of the guest memory.
7478 * @param pu256Value Pointer to the value to store.
7479 */
7480void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7481{
7482 /* The lazy approach for now... */
7483 uint8_t bUnmapInfo;
7484 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7485 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7486 pu256Dst->au64[0] = pu256Value->au64[0];
7487 pu256Dst->au64[1] = pu256Value->au64[1];
7488 pu256Dst->au64[2] = pu256Value->au64[2];
7489 pu256Dst->au64[3] = pu256Value->au64[3];
7490 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7491 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7492}
7493#endif
7494
7495
7496/**
7497 * Stores a data dqword, AVX \#GP(0) aligned.
7498 *
7499 * @returns Strict VBox status code.
7500 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7501 * @param iSegReg The index of the segment register to use for
7502 * this access. The base and limits are checked.
7503 * @param GCPtrMem The address of the guest memory.
7504 * @param pu256Value Pointer to the value to store.
7505 */
7506VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7507{
7508 /* The lazy approach for now... */
7509 uint8_t bUnmapInfo;
7510 PRTUINT256U pu256Dst;
7511 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7512 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7513 if (rc == VINF_SUCCESS)
7514 {
7515 pu256Dst->au64[0] = pu256Value->au64[0];
7516 pu256Dst->au64[1] = pu256Value->au64[1];
7517 pu256Dst->au64[2] = pu256Value->au64[2];
7518 pu256Dst->au64[3] = pu256Value->au64[3];
7519 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7520 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7521 }
7522 return rc;
7523}
7524
7525
7526#ifdef IEM_WITH_SETJMP
7527/**
7528 * Stores a data dqword, AVX aligned.
7529 *
7530 * @returns Strict VBox status code.
7531 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7532 * @param iSegReg The index of the segment register to use for
7533 * this access. The base and limits are checked.
7534 * @param GCPtrMem The address of the guest memory.
7535 * @param pu256Value Pointer to the value to store.
7536 */
7537void iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7538 PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7539{
7540 /* The lazy approach for now... */
7541 uint8_t bUnmapInfo;
7542 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7543 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7544 pu256Dst->au64[0] = pu256Value->au64[0];
7545 pu256Dst->au64[1] = pu256Value->au64[1];
7546 pu256Dst->au64[2] = pu256Value->au64[2];
7547 pu256Dst->au64[3] = pu256Value->au64[3];
7548 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7549 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7550}
7551#endif
7552
7553
7554/**
7555 * Stores a descriptor register (sgdt, sidt).
7556 *
7557 * @returns Strict VBox status code.
7558 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7559 * @param cbLimit The limit.
7560 * @param GCPtrBase The base address.
7561 * @param iSegReg The index of the segment register to use for
7562 * this access. The base and limits are checked.
7563 * @param GCPtrMem The address of the guest memory.
7564 */
7565VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7566{
7567 /*
7568 * The SIDT and SGDT instructions actually stores the data using two
7569 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
7570 * does not respond to opsize prefixes.
7571 */
7572 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
7573 if (rcStrict == VINF_SUCCESS)
7574 {
7575 if (IEM_IS_16BIT_CODE(pVCpu))
7576 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
7577 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
7578 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
7579 else if (IEM_IS_32BIT_CODE(pVCpu))
7580 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
7581 else
7582 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
7583 }
7584 return rcStrict;
7585}
7586
7587
7588/**
7589 * Begin a special stack push (used by interrupt, exceptions and such).
7590 *
7591 * This will raise \#SS or \#PF if appropriate.
7592 *
7593 * @returns Strict VBox status code.
7594 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7595 * @param cbMem The number of bytes to push onto the stack.
7596 * @param cbAlign The alignment mask (7, 3, 1).
7597 * @param ppvMem Where to return the pointer to the stack memory.
7598 * As with the other memory functions this could be
7599 * direct access or bounce buffered access, so
7600 * don't commit register until the commit call
7601 * succeeds.
7602 * @param pbUnmapInfo Where to store unmap info for
7603 * iemMemStackPushCommitSpecial.
7604 * @param puNewRsp Where to return the new RSP value. This must be
7605 * passed unchanged to
7606 * iemMemStackPushCommitSpecial().
7607 */
7608VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
7609 void **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
7610{
7611 Assert(cbMem < UINT8_MAX);
7612 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
7613 return iemMemMap(pVCpu, ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W, cbAlign);
7614}
7615
7616
7617/**
7618 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
7619 *
7620 * This will update the rSP.
7621 *
7622 * @returns Strict VBox status code.
7623 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7624 * @param bUnmapInfo Unmap info set by iemMemStackPushBeginSpecial.
7625 * @param uNewRsp The new RSP value returned by
7626 * iemMemStackPushBeginSpecial().
7627 */
7628VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo, uint64_t uNewRsp) RT_NOEXCEPT
7629{
7630 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7631 if (rcStrict == VINF_SUCCESS)
7632 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7633 return rcStrict;
7634}
7635
7636
7637/**
7638 * Begin a special stack pop (used by iret, retf and such).
7639 *
7640 * This will raise \#SS or \#PF if appropriate.
7641 *
7642 * @returns Strict VBox status code.
7643 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7644 * @param cbMem The number of bytes to pop from the stack.
7645 * @param cbAlign The alignment mask (7, 3, 1).
7646 * @param ppvMem Where to return the pointer to the stack memory.
7647 * @param pbUnmapInfo Where to store unmap info for
7648 * iemMemStackPopDoneSpecial.
7649 * @param puNewRsp Where to return the new RSP value. This must be
7650 * assigned to CPUMCTX::rsp manually some time
7651 * after iemMemStackPopDoneSpecial() has been
7652 * called.
7653 */
7654VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
7655 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
7656{
7657 Assert(cbMem < UINT8_MAX);
7658 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
7659 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
7660}
7661
7662
7663/**
7664 * Continue a special stack pop (used by iret and retf), for the purpose of
7665 * retrieving a new stack pointer.
7666 *
7667 * This will raise \#SS or \#PF if appropriate.
7668 *
7669 * @returns Strict VBox status code.
7670 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7671 * @param off Offset from the top of the stack. This is zero
7672 * except in the retf case.
7673 * @param cbMem The number of bytes to pop from the stack.
7674 * @param ppvMem Where to return the pointer to the stack memory.
7675 * @param pbUnmapInfo Where to store unmap info for
7676 * iemMemStackPopDoneSpecial.
7677 * @param uCurNewRsp The current uncommitted RSP value. (No need to
7678 * return this because all use of this function is
7679 * to retrieve a new value and anything we return
7680 * here would be discarded.)
7681 */
7682VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
7683 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t uCurNewRsp) RT_NOEXCEPT
7684{
7685 Assert(cbMem < UINT8_MAX);
7686
7687 /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */
7688 RTGCPTR GCPtrTop;
7689 if (IEM_IS_64BIT_CODE(pVCpu))
7690 GCPtrTop = uCurNewRsp;
7691 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
7692 GCPtrTop = (uint32_t)uCurNewRsp;
7693 else
7694 GCPtrTop = (uint16_t)uCurNewRsp;
7695
7696 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
7697 0 /* checked in iemMemStackPopBeginSpecial */);
7698}
7699
7700
7701/**
7702 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
7703 * iemMemStackPopContinueSpecial).
7704 *
7705 * The caller will manually commit the rSP.
7706 *
7707 * @returns Strict VBox status code.
7708 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7709 * @param bUnmapInfo Unmap information returned by
7710 * iemMemStackPopBeginSpecial() or
7711 * iemMemStackPopContinueSpecial().
7712 */
7713VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7714{
7715 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7716}
7717
7718
7719/**
7720 * Fetches a system table byte.
7721 *
7722 * @returns Strict VBox status code.
7723 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7724 * @param pbDst Where to return the byte.
7725 * @param iSegReg The index of the segment register to use for
7726 * this access. The base and limits are checked.
7727 * @param GCPtrMem The address of the guest memory.
7728 */
7729VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7730{
7731 /* The lazy approach for now... */
7732 uint8_t bUnmapInfo;
7733 uint8_t const *pbSrc;
7734 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, &bUnmapInfo, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7735 if (rc == VINF_SUCCESS)
7736 {
7737 *pbDst = *pbSrc;
7738 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7739 }
7740 return rc;
7741}
7742
7743
7744/**
7745 * Fetches a system table word.
7746 *
7747 * @returns Strict VBox status code.
7748 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7749 * @param pu16Dst Where to return the word.
7750 * @param iSegReg The index of the segment register to use for
7751 * this access. The base and limits are checked.
7752 * @param GCPtrMem The address of the guest memory.
7753 */
7754VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7755{
7756 /* The lazy approach for now... */
7757 uint8_t bUnmapInfo;
7758 uint16_t const *pu16Src;
7759 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, &bUnmapInfo, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7760 if (rc == VINF_SUCCESS)
7761 {
7762 *pu16Dst = *pu16Src;
7763 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7764 }
7765 return rc;
7766}
7767
7768
7769/**
7770 * Fetches a system table dword.
7771 *
7772 * @returns Strict VBox status code.
7773 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7774 * @param pu32Dst Where to return the dword.
7775 * @param iSegReg The index of the segment register to use for
7776 * this access. The base and limits are checked.
7777 * @param GCPtrMem The address of the guest memory.
7778 */
7779VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7780{
7781 /* The lazy approach for now... */
7782 uint8_t bUnmapInfo;
7783 uint32_t const *pu32Src;
7784 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7785 if (rc == VINF_SUCCESS)
7786 {
7787 *pu32Dst = *pu32Src;
7788 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7789 }
7790 return rc;
7791}
7792
7793
7794/**
7795 * Fetches a system table qword.
7796 *
7797 * @returns Strict VBox status code.
7798 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7799 * @param pu64Dst Where to return the qword.
7800 * @param iSegReg The index of the segment register to use for
7801 * this access. The base and limits are checked.
7802 * @param GCPtrMem The address of the guest memory.
7803 */
7804VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7805{
7806 /* The lazy approach for now... */
7807 uint8_t bUnmapInfo;
7808 uint64_t const *pu64Src;
7809 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, &bUnmapInfo, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7810 if (rc == VINF_SUCCESS)
7811 {
7812 *pu64Dst = *pu64Src;
7813 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7814 }
7815 return rc;
7816}
7817
7818
7819/**
7820 * Fetches a descriptor table entry with caller specified error code.
7821 *
7822 * @returns Strict VBox status code.
7823 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7824 * @param pDesc Where to return the descriptor table entry.
7825 * @param uSel The selector which table entry to fetch.
7826 * @param uXcpt The exception to raise on table lookup error.
7827 * @param uErrorCode The error code associated with the exception.
7828 */
7829static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
7830 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
7831{
7832 AssertPtr(pDesc);
7833 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
7834
7835 /** @todo did the 286 require all 8 bytes to be accessible? */
7836 /*
7837 * Get the selector table base and check bounds.
7838 */
7839 RTGCPTR GCPtrBase;
7840 if (uSel & X86_SEL_LDT)
7841 {
7842 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
7843 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
7844 {
7845 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
7846 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
7847 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
7848 uErrorCode, 0);
7849 }
7850
7851 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
7852 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
7853 }
7854 else
7855 {
7856 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
7857 {
7858 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
7859 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
7860 uErrorCode, 0);
7861 }
7862 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
7863 }
7864
7865 /*
7866 * Read the legacy descriptor and maybe the long mode extensions if
7867 * required.
7868 */
7869 VBOXSTRICTRC rcStrict;
7870 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
7871 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
7872 else
7873 {
7874 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
7875 if (rcStrict == VINF_SUCCESS)
7876 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
7877 if (rcStrict == VINF_SUCCESS)
7878 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
7879 if (rcStrict == VINF_SUCCESS)
7880 pDesc->Legacy.au16[3] = 0;
7881 else
7882 return rcStrict;
7883 }
7884
7885 if (rcStrict == VINF_SUCCESS)
7886 {
7887 if ( !IEM_IS_LONG_MODE(pVCpu)
7888 || pDesc->Legacy.Gen.u1DescType)
7889 pDesc->Long.au64[1] = 0;
7890 else if ( (uint32_t)(uSel | X86_SEL_RPL_LDT) + 8
7891 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
7892 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
7893 else
7894 {
7895 LogEx(LOG_GROUP_IEM,("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
7896 /** @todo is this the right exception? */
7897 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
7898 }
7899 }
7900 return rcStrict;
7901}
7902
7903
7904/**
7905 * Fetches a descriptor table entry.
7906 *
7907 * @returns Strict VBox status code.
7908 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7909 * @param pDesc Where to return the descriptor table entry.
7910 * @param uSel The selector which table entry to fetch.
7911 * @param uXcpt The exception to raise on table lookup error.
7912 */
7913VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
7914{
7915 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
7916}
7917
7918
7919/**
7920 * Marks the selector descriptor as accessed (only non-system descriptors).
7921 *
7922 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
7923 * will therefore skip the limit checks.
7924 *
7925 * @returns Strict VBox status code.
7926 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7927 * @param uSel The selector.
7928 */
7929VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
7930{
7931 /*
7932 * Get the selector table base and calculate the entry address.
7933 */
7934 RTGCPTR GCPtr = uSel & X86_SEL_LDT
7935 ? pVCpu->cpum.GstCtx.ldtr.u64Base
7936 : pVCpu->cpum.GstCtx.gdtr.pGdt;
7937 GCPtr += uSel & X86_SEL_MASK;
7938
7939 /*
7940 * ASMAtomicBitSet will assert if the address is misaligned, so do some
7941 * ugly stuff to avoid this. This will make sure it's an atomic access
7942 * as well more or less remove any question about 8-bit or 32-bit accesss.
7943 */
7944 VBOXSTRICTRC rcStrict;
7945 uint8_t bUnmapInfo;
7946 uint32_t volatile *pu32;
7947 if ((GCPtr & 3) == 0)
7948 {
7949 /* The normal case, map the 32-bit bits around the accessed bit (40). */
7950 GCPtr += 2 + 2;
7951 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
7952 if (rcStrict != VINF_SUCCESS)
7953 return rcStrict;
7954 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
7955 }
7956 else
7957 {
7958 /* The misaligned GDT/LDT case, map the whole thing. */
7959 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
7960 if (rcStrict != VINF_SUCCESS)
7961 return rcStrict;
7962 switch ((uintptr_t)pu32 & 3)
7963 {
7964 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
7965 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
7966 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
7967 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
7968 }
7969 }
7970
7971 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7972}
7973
7974
7975#undef LOG_GROUP
7976#define LOG_GROUP LOG_GROUP_IEM
7977
7978/** @} */
7979
7980/** @name Opcode Helpers.
7981 * @{
7982 */
7983
7984/**
7985 * Calculates the effective address of a ModR/M memory operand.
7986 *
7987 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
7988 *
7989 * @return Strict VBox status code.
7990 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7991 * @param bRm The ModRM byte.
7992 * @param cbImmAndRspOffset - First byte: The size of any immediate
7993 * following the effective address opcode bytes
7994 * (only for RIP relative addressing).
7995 * - Second byte: RSP displacement (for POP [ESP]).
7996 * @param pGCPtrEff Where to return the effective address.
7997 */
7998VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
7999{
8000 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8001# define SET_SS_DEF() \
8002 do \
8003 { \
8004 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8005 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8006 } while (0)
8007
8008 if (!IEM_IS_64BIT_CODE(pVCpu))
8009 {
8010/** @todo Check the effective address size crap! */
8011 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8012 {
8013 uint16_t u16EffAddr;
8014
8015 /* Handle the disp16 form with no registers first. */
8016 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8017 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8018 else
8019 {
8020 /* Get the displacment. */
8021 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8022 {
8023 case 0: u16EffAddr = 0; break;
8024 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8025 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8026 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8027 }
8028
8029 /* Add the base and index registers to the disp. */
8030 switch (bRm & X86_MODRM_RM_MASK)
8031 {
8032 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8033 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8034 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8035 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8036 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8037 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8038 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8039 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8040 }
8041 }
8042
8043 *pGCPtrEff = u16EffAddr;
8044 }
8045 else
8046 {
8047 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8048 uint32_t u32EffAddr;
8049
8050 /* Handle the disp32 form with no registers first. */
8051 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8052 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8053 else
8054 {
8055 /* Get the register (or SIB) value. */
8056 switch ((bRm & X86_MODRM_RM_MASK))
8057 {
8058 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8059 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8060 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8061 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8062 case 4: /* SIB */
8063 {
8064 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8065
8066 /* Get the index and scale it. */
8067 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8068 {
8069 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8070 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8071 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8072 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8073 case 4: u32EffAddr = 0; /*none */ break;
8074 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8075 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8076 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8077 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8078 }
8079 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8080
8081 /* add base */
8082 switch (bSib & X86_SIB_BASE_MASK)
8083 {
8084 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8085 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8086 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8087 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8088 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8089 case 5:
8090 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8091 {
8092 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8093 SET_SS_DEF();
8094 }
8095 else
8096 {
8097 uint32_t u32Disp;
8098 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8099 u32EffAddr += u32Disp;
8100 }
8101 break;
8102 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8103 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8104 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8105 }
8106 break;
8107 }
8108 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8109 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8110 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8111 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8112 }
8113
8114 /* Get and add the displacement. */
8115 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8116 {
8117 case 0:
8118 break;
8119 case 1:
8120 {
8121 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8122 u32EffAddr += i8Disp;
8123 break;
8124 }
8125 case 2:
8126 {
8127 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8128 u32EffAddr += u32Disp;
8129 break;
8130 }
8131 default:
8132 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8133 }
8134
8135 }
8136 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8137 *pGCPtrEff = u32EffAddr;
8138 }
8139 }
8140 else
8141 {
8142 uint64_t u64EffAddr;
8143
8144 /* Handle the rip+disp32 form with no registers first. */
8145 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8146 {
8147 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8148 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8149 }
8150 else
8151 {
8152 /* Get the register (or SIB) value. */
8153 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8154 {
8155 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8156 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8157 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8158 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8159 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8160 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8161 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8162 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8163 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8164 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8165 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8166 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8167 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8168 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8169 /* SIB */
8170 case 4:
8171 case 12:
8172 {
8173 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8174
8175 /* Get the index and scale it. */
8176 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8177 {
8178 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8179 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8180 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8181 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8182 case 4: u64EffAddr = 0; /*none */ break;
8183 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8184 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8185 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8186 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8187 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8188 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8189 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8190 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8191 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8192 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8193 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8194 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8195 }
8196 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8197
8198 /* add base */
8199 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8200 {
8201 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8202 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8203 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8204 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8205 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8206 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8207 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8208 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8209 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8210 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8211 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8212 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8213 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8214 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8215 /* complicated encodings */
8216 case 5:
8217 case 13:
8218 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8219 {
8220 if (!pVCpu->iem.s.uRexB)
8221 {
8222 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8223 SET_SS_DEF();
8224 }
8225 else
8226 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8227 }
8228 else
8229 {
8230 uint32_t u32Disp;
8231 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8232 u64EffAddr += (int32_t)u32Disp;
8233 }
8234 break;
8235 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8236 }
8237 break;
8238 }
8239 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8240 }
8241
8242 /* Get and add the displacement. */
8243 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8244 {
8245 case 0:
8246 break;
8247 case 1:
8248 {
8249 int8_t i8Disp;
8250 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8251 u64EffAddr += i8Disp;
8252 break;
8253 }
8254 case 2:
8255 {
8256 uint32_t u32Disp;
8257 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8258 u64EffAddr += (int32_t)u32Disp;
8259 break;
8260 }
8261 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8262 }
8263
8264 }
8265
8266 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8267 *pGCPtrEff = u64EffAddr;
8268 else
8269 {
8270 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8271 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8272 }
8273 }
8274
8275 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
8276 return VINF_SUCCESS;
8277}
8278
8279
8280#ifdef IEM_WITH_SETJMP
8281/**
8282 * Calculates the effective address of a ModR/M memory operand.
8283 *
8284 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8285 *
8286 * May longjmp on internal error.
8287 *
8288 * @return The effective address.
8289 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8290 * @param bRm The ModRM byte.
8291 * @param cbImmAndRspOffset - First byte: The size of any immediate
8292 * following the effective address opcode bytes
8293 * (only for RIP relative addressing).
8294 * - Second byte: RSP displacement (for POP [ESP]).
8295 */
8296RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP
8297{
8298 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
8299# define SET_SS_DEF() \
8300 do \
8301 { \
8302 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8303 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8304 } while (0)
8305
8306 if (!IEM_IS_64BIT_CODE(pVCpu))
8307 {
8308/** @todo Check the effective address size crap! */
8309 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8310 {
8311 uint16_t u16EffAddr;
8312
8313 /* Handle the disp16 form with no registers first. */
8314 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8315 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8316 else
8317 {
8318 /* Get the displacment. */
8319 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8320 {
8321 case 0: u16EffAddr = 0; break;
8322 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8323 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8324 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */
8325 }
8326
8327 /* Add the base and index registers to the disp. */
8328 switch (bRm & X86_MODRM_RM_MASK)
8329 {
8330 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8331 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8332 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8333 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8334 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8335 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8336 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8337 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8338 }
8339 }
8340
8341 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
8342 return u16EffAddr;
8343 }
8344
8345 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8346 uint32_t u32EffAddr;
8347
8348 /* Handle the disp32 form with no registers first. */
8349 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8350 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8351 else
8352 {
8353 /* Get the register (or SIB) value. */
8354 switch ((bRm & X86_MODRM_RM_MASK))
8355 {
8356 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8357 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8358 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8359 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8360 case 4: /* SIB */
8361 {
8362 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8363
8364 /* Get the index and scale it. */
8365 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8366 {
8367 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8368 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8369 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8370 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8371 case 4: u32EffAddr = 0; /*none */ break;
8372 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8373 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8374 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8375 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8376 }
8377 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8378
8379 /* add base */
8380 switch (bSib & X86_SIB_BASE_MASK)
8381 {
8382 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8383 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8384 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8385 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8386 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8387 case 5:
8388 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8389 {
8390 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8391 SET_SS_DEF();
8392 }
8393 else
8394 {
8395 uint32_t u32Disp;
8396 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8397 u32EffAddr += u32Disp;
8398 }
8399 break;
8400 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8401 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8402 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8403 }
8404 break;
8405 }
8406 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8407 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8408 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8409 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8410 }
8411
8412 /* Get and add the displacement. */
8413 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8414 {
8415 case 0:
8416 break;
8417 case 1:
8418 {
8419 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8420 u32EffAddr += i8Disp;
8421 break;
8422 }
8423 case 2:
8424 {
8425 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8426 u32EffAddr += u32Disp;
8427 break;
8428 }
8429 default:
8430 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */
8431 }
8432 }
8433
8434 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8435 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
8436 return u32EffAddr;
8437 }
8438
8439 uint64_t u64EffAddr;
8440
8441 /* Handle the rip+disp32 form with no registers first. */
8442 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8443 {
8444 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8445 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8446 }
8447 else
8448 {
8449 /* Get the register (or SIB) value. */
8450 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8451 {
8452 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8453 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8454 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8455 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8456 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8457 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8458 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8459 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8460 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8461 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8462 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8463 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8464 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8465 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8466 /* SIB */
8467 case 4:
8468 case 12:
8469 {
8470 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8471
8472 /* Get the index and scale it. */
8473 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8474 {
8475 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8476 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8477 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8478 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8479 case 4: u64EffAddr = 0; /*none */ break;
8480 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8481 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8482 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8483 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8484 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8485 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8486 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8487 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8488 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8489 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8490 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8491 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8492 }
8493 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8494
8495 /* add base */
8496 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8497 {
8498 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8499 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8500 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8501 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8502 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8503 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8504 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8505 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8506 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8507 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8508 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8509 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8510 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8511 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8512 /* complicated encodings */
8513 case 5:
8514 case 13:
8515 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8516 {
8517 if (!pVCpu->iem.s.uRexB)
8518 {
8519 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8520 SET_SS_DEF();
8521 }
8522 else
8523 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8524 }
8525 else
8526 {
8527 uint32_t u32Disp;
8528 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8529 u64EffAddr += (int32_t)u32Disp;
8530 }
8531 break;
8532 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8533 }
8534 break;
8535 }
8536 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8537 }
8538
8539 /* Get and add the displacement. */
8540 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8541 {
8542 case 0:
8543 break;
8544 case 1:
8545 {
8546 int8_t i8Disp;
8547 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8548 u64EffAddr += i8Disp;
8549 break;
8550 }
8551 case 2:
8552 {
8553 uint32_t u32Disp;
8554 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8555 u64EffAddr += (int32_t)u32Disp;
8556 break;
8557 }
8558 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
8559 }
8560
8561 }
8562
8563 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8564 {
8565 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
8566 return u64EffAddr;
8567 }
8568 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8569 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
8570 return u64EffAddr & UINT32_MAX;
8571}
8572#endif /* IEM_WITH_SETJMP */
8573
8574
8575/**
8576 * Calculates the effective address of a ModR/M memory operand, extended version
8577 * for use in the recompilers.
8578 *
8579 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8580 *
8581 * @return Strict VBox status code.
8582 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8583 * @param bRm The ModRM byte.
8584 * @param cbImmAndRspOffset - First byte: The size of any immediate
8585 * following the effective address opcode bytes
8586 * (only for RIP relative addressing).
8587 * - Second byte: RSP displacement (for POP [ESP]).
8588 * @param pGCPtrEff Where to return the effective address.
8589 * @param puInfo Extra info: 32-bit displacement (bits 31:0) and
8590 * SIB byte (bits 39:32).
8591 */
8592VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT
8593{
8594 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8595# define SET_SS_DEF() \
8596 do \
8597 { \
8598 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8599 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8600 } while (0)
8601
8602 uint64_t uInfo;
8603 if (!IEM_IS_64BIT_CODE(pVCpu))
8604 {
8605/** @todo Check the effective address size crap! */
8606 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8607 {
8608 uint16_t u16EffAddr;
8609
8610 /* Handle the disp16 form with no registers first. */
8611 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8612 {
8613 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8614 uInfo = u16EffAddr;
8615 }
8616 else
8617 {
8618 /* Get the displacment. */
8619 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8620 {
8621 case 0: u16EffAddr = 0; break;
8622 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8623 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8624 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8625 }
8626 uInfo = u16EffAddr;
8627
8628 /* Add the base and index registers to the disp. */
8629 switch (bRm & X86_MODRM_RM_MASK)
8630 {
8631 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8632 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8633 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8634 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8635 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8636 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8637 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8638 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8639 }
8640 }
8641
8642 *pGCPtrEff = u16EffAddr;
8643 }
8644 else
8645 {
8646 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8647 uint32_t u32EffAddr;
8648
8649 /* Handle the disp32 form with no registers first. */
8650 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8651 {
8652 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8653 uInfo = u32EffAddr;
8654 }
8655 else
8656 {
8657 /* Get the register (or SIB) value. */
8658 uInfo = 0;
8659 switch ((bRm & X86_MODRM_RM_MASK))
8660 {
8661 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8662 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8663 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8664 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8665 case 4: /* SIB */
8666 {
8667 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8668 uInfo = (uint64_t)bSib << 32;
8669
8670 /* Get the index and scale it. */
8671 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8672 {
8673 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8674 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8675 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8676 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8677 case 4: u32EffAddr = 0; /*none */ break;
8678 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8679 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8680 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8681 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8682 }
8683 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8684
8685 /* add base */
8686 switch (bSib & X86_SIB_BASE_MASK)
8687 {
8688 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8689 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8690 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8691 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8692 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8693 case 5:
8694 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8695 {
8696 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8697 SET_SS_DEF();
8698 }
8699 else
8700 {
8701 uint32_t u32Disp;
8702 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8703 u32EffAddr += u32Disp;
8704 uInfo |= u32Disp;
8705 }
8706 break;
8707 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8708 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8709 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8710 }
8711 break;
8712 }
8713 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8714 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8715 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8716 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8717 }
8718
8719 /* Get and add the displacement. */
8720 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8721 {
8722 case 0:
8723 break;
8724 case 1:
8725 {
8726 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8727 u32EffAddr += i8Disp;
8728 uInfo |= (uint32_t)(int32_t)i8Disp;
8729 break;
8730 }
8731 case 2:
8732 {
8733 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8734 u32EffAddr += u32Disp;
8735 uInfo |= (uint32_t)u32Disp;
8736 break;
8737 }
8738 default:
8739 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8740 }
8741
8742 }
8743 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8744 *pGCPtrEff = u32EffAddr;
8745 }
8746 }
8747 else
8748 {
8749 uint64_t u64EffAddr;
8750
8751 /* Handle the rip+disp32 form with no registers first. */
8752 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8753 {
8754 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8755 uInfo = (uint32_t)u64EffAddr;
8756 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8757 }
8758 else
8759 {
8760 /* Get the register (or SIB) value. */
8761 uInfo = 0;
8762 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8763 {
8764 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8765 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8766 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8767 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8768 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8769 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8770 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8771 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8772 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8773 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8774 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8775 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8776 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8777 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8778 /* SIB */
8779 case 4:
8780 case 12:
8781 {
8782 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8783 uInfo = (uint64_t)bSib << 32;
8784
8785 /* Get the index and scale it. */
8786 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8787 {
8788 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8789 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8790 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8791 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8792 case 4: u64EffAddr = 0; /*none */ break;
8793 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8794 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8795 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8796 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8797 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8798 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8799 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8800 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8801 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8802 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8803 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8804 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8805 }
8806 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8807
8808 /* add base */
8809 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8810 {
8811 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8812 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8813 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8814 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8815 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8816 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8817 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8818 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8819 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8820 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8821 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8822 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8823 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8824 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8825 /* complicated encodings */
8826 case 5:
8827 case 13:
8828 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8829 {
8830 if (!pVCpu->iem.s.uRexB)
8831 {
8832 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8833 SET_SS_DEF();
8834 }
8835 else
8836 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8837 }
8838 else
8839 {
8840 uint32_t u32Disp;
8841 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8842 u64EffAddr += (int32_t)u32Disp;
8843 uInfo |= u32Disp;
8844 }
8845 break;
8846 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8847 }
8848 break;
8849 }
8850 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8851 }
8852
8853 /* Get and add the displacement. */
8854 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8855 {
8856 case 0:
8857 break;
8858 case 1:
8859 {
8860 int8_t i8Disp;
8861 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8862 u64EffAddr += i8Disp;
8863 uInfo |= (uint32_t)(int32_t)i8Disp;
8864 break;
8865 }
8866 case 2:
8867 {
8868 uint32_t u32Disp;
8869 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8870 u64EffAddr += (int32_t)u32Disp;
8871 uInfo |= u32Disp;
8872 break;
8873 }
8874 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8875 }
8876
8877 }
8878
8879 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8880 *pGCPtrEff = u64EffAddr;
8881 else
8882 {
8883 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8884 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8885 }
8886 }
8887 *puInfo = uInfo;
8888
8889 Log5(("iemOpHlpCalcRmEffAddrEx: EffAddr=%#010RGv uInfo=%RX64\n", *pGCPtrEff, uInfo));
8890 return VINF_SUCCESS;
8891}
8892
8893/** @} */
8894
8895
8896#ifdef LOG_ENABLED
8897/**
8898 * Logs the current instruction.
8899 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
8900 * @param fSameCtx Set if we have the same context information as the VMM,
8901 * clear if we may have already executed an instruction in
8902 * our debug context. When clear, we assume IEMCPU holds
8903 * valid CPU mode info.
8904 *
8905 * The @a fSameCtx parameter is now misleading and obsolete.
8906 * @param pszFunction The IEM function doing the execution.
8907 */
8908static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
8909{
8910# ifdef IN_RING3
8911 if (LogIs2Enabled())
8912 {
8913 char szInstr[256];
8914 uint32_t cbInstr = 0;
8915 if (fSameCtx)
8916 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
8917 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
8918 szInstr, sizeof(szInstr), &cbInstr);
8919 else
8920 {
8921 uint32_t fFlags = 0;
8922 switch (IEM_GET_CPU_MODE(pVCpu))
8923 {
8924 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
8925 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
8926 case IEMMODE_16BIT:
8927 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
8928 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
8929 else
8930 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
8931 break;
8932 }
8933 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
8934 szInstr, sizeof(szInstr), &cbInstr);
8935 }
8936
8937 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
8938 Log2(("**** %s fExec=%x\n"
8939 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
8940 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
8941 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
8942 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
8943 " %s\n"
8944 , pszFunction, pVCpu->iem.s.fExec,
8945 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
8946 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
8947 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
8948 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
8949 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
8950 szInstr));
8951
8952 /* This stuff sucks atm. as it fills the log with MSRs. */
8953 //if (LogIs3Enabled())
8954 // DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
8955 }
8956 else
8957# endif
8958 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
8959 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
8960 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
8961}
8962#endif /* LOG_ENABLED */
8963
8964
8965#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8966/**
8967 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
8968 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
8969 *
8970 * @returns Modified rcStrict.
8971 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8972 * @param rcStrict The instruction execution status.
8973 */
8974static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
8975{
8976 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
8977 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
8978 {
8979 /* VMX preemption timer takes priority over NMI-window exits. */
8980 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
8981 {
8982 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
8983 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
8984 }
8985 /*
8986 * Check remaining intercepts.
8987 *
8988 * NMI-window and Interrupt-window VM-exits.
8989 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
8990 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
8991 *
8992 * See Intel spec. 26.7.6 "NMI-Window Exiting".
8993 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
8994 */
8995 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
8996 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
8997 && !TRPMHasTrap(pVCpu))
8998 {
8999 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
9000 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
9001 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
9002 {
9003 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
9004 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
9005 }
9006 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
9007 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
9008 {
9009 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
9010 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
9011 }
9012 }
9013 }
9014 /* TPR-below threshold/APIC write has the highest priority. */
9015 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
9016 {
9017 rcStrict = iemVmxApicWriteEmulation(pVCpu);
9018 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9019 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
9020 }
9021 /* MTF takes priority over VMX-preemption timer. */
9022 else
9023 {
9024 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
9025 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9026 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
9027 }
9028 return rcStrict;
9029}
9030#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9031
9032
9033/**
9034 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
9035 * IEMExecOneWithPrefetchedByPC.
9036 *
9037 * Similar code is found in IEMExecLots.
9038 *
9039 * @return Strict VBox status code.
9040 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9041 * @param fExecuteInhibit If set, execute the instruction following CLI,
9042 * POP SS and MOV SS,GR.
9043 * @param pszFunction The calling function name.
9044 */
9045DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
9046{
9047 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9048 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9049 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9050 RT_NOREF_PV(pszFunction);
9051
9052#ifdef IEM_WITH_SETJMP
9053 VBOXSTRICTRC rcStrict;
9054 IEM_TRY_SETJMP(pVCpu, rcStrict)
9055 {
9056 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9057 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9058 }
9059 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9060 {
9061 pVCpu->iem.s.cLongJumps++;
9062 }
9063 IEM_CATCH_LONGJMP_END(pVCpu);
9064#else
9065 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9066 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9067#endif
9068 if (rcStrict == VINF_SUCCESS)
9069 pVCpu->iem.s.cInstructions++;
9070 if (pVCpu->iem.s.cActiveMappings > 0)
9071 {
9072 Assert(rcStrict != VINF_SUCCESS);
9073 iemMemRollback(pVCpu);
9074 }
9075 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9076 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9077 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9078
9079//#ifdef DEBUG
9080// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
9081//#endif
9082
9083#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9084 /*
9085 * Perform any VMX nested-guest instruction boundary actions.
9086 *
9087 * If any of these causes a VM-exit, we must skip executing the next
9088 * instruction (would run into stale page tables). A VM-exit makes sure
9089 * there is no interrupt-inhibition, so that should ensure we don't go
9090 * to try execute the next instruction. Clearing fExecuteInhibit is
9091 * problematic because of the setjmp/longjmp clobbering above.
9092 */
9093 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9094 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
9095 || rcStrict != VINF_SUCCESS)
9096 { /* likely */ }
9097 else
9098 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9099#endif
9100
9101 /* Execute the next instruction as well if a cli, pop ss or
9102 mov ss, Gr has just completed successfully. */
9103 if ( fExecuteInhibit
9104 && rcStrict == VINF_SUCCESS
9105 && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
9106 {
9107 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fExec & (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK));
9108 if (rcStrict == VINF_SUCCESS)
9109 {
9110#ifdef LOG_ENABLED
9111 iemLogCurInstr(pVCpu, false, pszFunction);
9112#endif
9113#ifdef IEM_WITH_SETJMP
9114 IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
9115 {
9116 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9117 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9118 }
9119 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9120 {
9121 pVCpu->iem.s.cLongJumps++;
9122 }
9123 IEM_CATCH_LONGJMP_END(pVCpu);
9124#else
9125 IEM_OPCODE_GET_FIRST_U8(&b);
9126 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9127#endif
9128 if (rcStrict == VINF_SUCCESS)
9129 {
9130 pVCpu->iem.s.cInstructions++;
9131#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9132 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9133 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
9134 { /* likely */ }
9135 else
9136 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9137#endif
9138 }
9139 if (pVCpu->iem.s.cActiveMappings > 0)
9140 {
9141 Assert(rcStrict != VINF_SUCCESS);
9142 iemMemRollback(pVCpu);
9143 }
9144 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9145 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9146 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9147 }
9148 else if (pVCpu->iem.s.cActiveMappings > 0)
9149 iemMemRollback(pVCpu);
9150 /** @todo drop this after we bake this change into RIP advancing. */
9151 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
9152 }
9153
9154 /*
9155 * Return value fiddling, statistics and sanity assertions.
9156 */
9157 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9158
9159 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9160 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9161 return rcStrict;
9162}
9163
9164
9165/**
9166 * Execute one instruction.
9167 *
9168 * @return Strict VBox status code.
9169 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9170 */
9171VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
9172{
9173 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
9174#ifdef LOG_ENABLED
9175 iemLogCurInstr(pVCpu, true, "IEMExecOne");
9176#endif
9177
9178 /*
9179 * Do the decoding and emulation.
9180 */
9181 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9182 if (rcStrict == VINF_SUCCESS)
9183 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
9184 else if (pVCpu->iem.s.cActiveMappings > 0)
9185 iemMemRollback(pVCpu);
9186
9187 if (rcStrict != VINF_SUCCESS)
9188 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9189 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9190 return rcStrict;
9191}
9192
9193
9194VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9195{
9196 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9197 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9198 if (rcStrict == VINF_SUCCESS)
9199 {
9200 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
9201 if (pcbWritten)
9202 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9203 }
9204 else if (pVCpu->iem.s.cActiveMappings > 0)
9205 iemMemRollback(pVCpu);
9206
9207 return rcStrict;
9208}
9209
9210
9211VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9212 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9213{
9214 VBOXSTRICTRC rcStrict;
9215 if ( cbOpcodeBytes
9216 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9217 {
9218 iemInitDecoder(pVCpu, 0 /*fExecOpts*/);
9219#ifdef IEM_WITH_CODE_TLB
9220 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9221 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9222 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9223 pVCpu->iem.s.offCurInstrStart = 0;
9224 pVCpu->iem.s.offInstrNextByte = 0;
9225 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9226#else
9227 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9228 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9229#endif
9230 rcStrict = VINF_SUCCESS;
9231 }
9232 else
9233 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9234 if (rcStrict == VINF_SUCCESS)
9235 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
9236 else if (pVCpu->iem.s.cActiveMappings > 0)
9237 iemMemRollback(pVCpu);
9238
9239 return rcStrict;
9240}
9241
9242
9243VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9244{
9245 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9246 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9247 if (rcStrict == VINF_SUCCESS)
9248 {
9249 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
9250 if (pcbWritten)
9251 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9252 }
9253 else if (pVCpu->iem.s.cActiveMappings > 0)
9254 iemMemRollback(pVCpu);
9255
9256 return rcStrict;
9257}
9258
9259
9260VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9261 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9262{
9263 VBOXSTRICTRC rcStrict;
9264 if ( cbOpcodeBytes
9265 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9266 {
9267 iemInitDecoder(pVCpu, IEM_F_BYPASS_HANDLERS);
9268#ifdef IEM_WITH_CODE_TLB
9269 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9270 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9271 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9272 pVCpu->iem.s.offCurInstrStart = 0;
9273 pVCpu->iem.s.offInstrNextByte = 0;
9274 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9275#else
9276 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9277 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9278#endif
9279 rcStrict = VINF_SUCCESS;
9280 }
9281 else
9282 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9283 if (rcStrict == VINF_SUCCESS)
9284 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
9285 else if (pVCpu->iem.s.cActiveMappings > 0)
9286 iemMemRollback(pVCpu);
9287
9288 return rcStrict;
9289}
9290
9291
9292/**
9293 * For handling split cacheline lock operations when the host has split-lock
9294 * detection enabled.
9295 *
9296 * This will cause the interpreter to disregard the lock prefix and implicit
9297 * locking (xchg).
9298 *
9299 * @returns Strict VBox status code.
9300 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9301 */
9302VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
9303{
9304 /*
9305 * Do the decoding and emulation.
9306 */
9307 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_X86_DISREGARD_LOCK);
9308 if (rcStrict == VINF_SUCCESS)
9309 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
9310 else if (pVCpu->iem.s.cActiveMappings > 0)
9311 iemMemRollback(pVCpu);
9312
9313 if (rcStrict != VINF_SUCCESS)
9314 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9315 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9316 return rcStrict;
9317}
9318
9319
9320/**
9321 * Code common to IEMExecLots and IEMExecRecompilerThreaded that attempts to
9322 * inject a pending TRPM trap.
9323 */
9324VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu)
9325{
9326 Assert(TRPMHasTrap(pVCpu));
9327
9328 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9329 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
9330 {
9331 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
9332#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9333 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
9334 if (fIntrEnabled)
9335 {
9336 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
9337 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9338 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
9339 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
9340 else
9341 {
9342 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
9343 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
9344 }
9345 }
9346#else
9347 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9348#endif
9349 if (fIntrEnabled)
9350 {
9351 uint8_t u8TrapNo;
9352 TRPMEVENT enmType;
9353 uint32_t uErrCode;
9354 RTGCPTR uCr2;
9355 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
9356 AssertRC(rc2);
9357 Assert(enmType == TRPM_HARDWARE_INT);
9358 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
9359
9360 TRPMResetTrap(pVCpu);
9361
9362#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9363 /* Injecting an event may cause a VM-exit. */
9364 if ( rcStrict != VINF_SUCCESS
9365 && rcStrict != VINF_IEM_RAISED_XCPT)
9366 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
9367#else
9368 NOREF(rcStrict);
9369#endif
9370 }
9371 }
9372
9373 return VINF_SUCCESS;
9374}
9375
9376
9377VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
9378{
9379 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
9380 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
9381 Assert(cMaxInstructions > 0);
9382
9383 /*
9384 * See if there is an interrupt pending in TRPM, inject it if we can.
9385 */
9386 /** @todo What if we are injecting an exception and not an interrupt? Is that
9387 * possible here? For now we assert it is indeed only an interrupt. */
9388 if (!TRPMHasTrap(pVCpu))
9389 { /* likely */ }
9390 else
9391 {
9392 VBOXSTRICTRC rcStrict = iemExecInjectPendingTrap(pVCpu);
9393 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9394 { /*likely */ }
9395 else
9396 return rcStrict;
9397 }
9398
9399 /*
9400 * Initial decoder init w/ prefetch, then setup setjmp.
9401 */
9402 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9403 if (rcStrict == VINF_SUCCESS)
9404 {
9405#ifdef IEM_WITH_SETJMP
9406 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
9407 IEM_TRY_SETJMP(pVCpu, rcStrict)
9408#endif
9409 {
9410 /*
9411 * The run loop. We limit ourselves to 4096 instructions right now.
9412 */
9413 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
9414 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9415 for (;;)
9416 {
9417 /*
9418 * Log the state.
9419 */
9420#ifdef LOG_ENABLED
9421 iemLogCurInstr(pVCpu, true, "IEMExecLots");
9422#endif
9423
9424 /*
9425 * Do the decoding and emulation.
9426 */
9427 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9428 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9429#ifdef VBOX_STRICT
9430 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
9431#endif
9432 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9433 {
9434 Assert(pVCpu->iem.s.cActiveMappings == 0);
9435 pVCpu->iem.s.cInstructions++;
9436
9437#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9438 /* Perform any VMX nested-guest instruction boundary actions. */
9439 uint64_t fCpu = pVCpu->fLocalForcedActions;
9440 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9441 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
9442 { /* likely */ }
9443 else
9444 {
9445 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9446 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9447 fCpu = pVCpu->fLocalForcedActions;
9448 else
9449 {
9450 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9451 break;
9452 }
9453 }
9454#endif
9455 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9456 {
9457#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
9458 uint64_t fCpu = pVCpu->fLocalForcedActions;
9459#endif
9460 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9461 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9462 | VMCPU_FF_TLB_FLUSH
9463 | VMCPU_FF_UNHALT );
9464
9465 if (RT_LIKELY( ( !fCpu
9466 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9467 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
9468 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
9469 {
9470 if (--cMaxInstructionsGccStupidity > 0)
9471 {
9472 /* Poll timers every now an then according to the caller's specs. */
9473 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
9474 || !TMTimerPollBool(pVM, pVCpu))
9475 {
9476 Assert(pVCpu->iem.s.cActiveMappings == 0);
9477 iemReInitDecoder(pVCpu);
9478 continue;
9479 }
9480 }
9481 }
9482 }
9483 Assert(pVCpu->iem.s.cActiveMappings == 0);
9484 }
9485 else if (pVCpu->iem.s.cActiveMappings > 0)
9486 iemMemRollback(pVCpu);
9487 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9488 break;
9489 }
9490 }
9491#ifdef IEM_WITH_SETJMP
9492 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9493 {
9494 if (pVCpu->iem.s.cActiveMappings > 0)
9495 iemMemRollback(pVCpu);
9496# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9497 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9498# endif
9499 pVCpu->iem.s.cLongJumps++;
9500 }
9501 IEM_CATCH_LONGJMP_END(pVCpu);
9502#endif
9503
9504 /*
9505 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9506 */
9507 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9508 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9509 }
9510 else
9511 {
9512 if (pVCpu->iem.s.cActiveMappings > 0)
9513 iemMemRollback(pVCpu);
9514
9515#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9516 /*
9517 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9518 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9519 */
9520 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9521#endif
9522 }
9523
9524 /*
9525 * Maybe re-enter raw-mode and log.
9526 */
9527 if (rcStrict != VINF_SUCCESS)
9528 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9529 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9530 if (pcInstructions)
9531 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
9532 return rcStrict;
9533}
9534
9535
9536/**
9537 * Interface used by EMExecuteExec, does exit statistics and limits.
9538 *
9539 * @returns Strict VBox status code.
9540 * @param pVCpu The cross context virtual CPU structure.
9541 * @param fWillExit To be defined.
9542 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
9543 * @param cMaxInstructions Maximum number of instructions to execute.
9544 * @param cMaxInstructionsWithoutExits
9545 * The max number of instructions without exits.
9546 * @param pStats Where to return statistics.
9547 */
9548VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
9549 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
9550{
9551 NOREF(fWillExit); /** @todo define flexible exit crits */
9552
9553 /*
9554 * Initialize return stats.
9555 */
9556 pStats->cInstructions = 0;
9557 pStats->cExits = 0;
9558 pStats->cMaxExitDistance = 0;
9559 pStats->cReserved = 0;
9560
9561 /*
9562 * Initial decoder init w/ prefetch, then setup setjmp.
9563 */
9564 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9565 if (rcStrict == VINF_SUCCESS)
9566 {
9567#ifdef IEM_WITH_SETJMP
9568 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf?!? */
9569 IEM_TRY_SETJMP(pVCpu, rcStrict)
9570#endif
9571 {
9572#ifdef IN_RING0
9573 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
9574#endif
9575 uint32_t cInstructionSinceLastExit = 0;
9576
9577 /*
9578 * The run loop. We limit ourselves to 4096 instructions right now.
9579 */
9580 PVM pVM = pVCpu->CTX_SUFF(pVM);
9581 for (;;)
9582 {
9583 /*
9584 * Log the state.
9585 */
9586#ifdef LOG_ENABLED
9587 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
9588#endif
9589
9590 /*
9591 * Do the decoding and emulation.
9592 */
9593 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
9594
9595 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9596 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9597
9598 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
9599 && cInstructionSinceLastExit > 0 /* don't count the first */ )
9600 {
9601 pStats->cExits += 1;
9602 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
9603 pStats->cMaxExitDistance = cInstructionSinceLastExit;
9604 cInstructionSinceLastExit = 0;
9605 }
9606
9607 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9608 {
9609 Assert(pVCpu->iem.s.cActiveMappings == 0);
9610 pVCpu->iem.s.cInstructions++;
9611 pStats->cInstructions++;
9612 cInstructionSinceLastExit++;
9613
9614#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9615 /* Perform any VMX nested-guest instruction boundary actions. */
9616 uint64_t fCpu = pVCpu->fLocalForcedActions;
9617 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9618 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
9619 { /* likely */ }
9620 else
9621 {
9622 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9623 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9624 fCpu = pVCpu->fLocalForcedActions;
9625 else
9626 {
9627 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9628 break;
9629 }
9630 }
9631#endif
9632 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9633 {
9634#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
9635 uint64_t fCpu = pVCpu->fLocalForcedActions;
9636#endif
9637 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9638 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9639 | VMCPU_FF_TLB_FLUSH
9640 | VMCPU_FF_UNHALT );
9641 if (RT_LIKELY( ( ( !fCpu
9642 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9643 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
9644 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
9645 || pStats->cInstructions < cMinInstructions))
9646 {
9647 if (pStats->cInstructions < cMaxInstructions)
9648 {
9649 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
9650 {
9651#ifdef IN_RING0
9652 if ( !fCheckPreemptionPending
9653 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
9654#endif
9655 {
9656 Assert(pVCpu->iem.s.cActiveMappings == 0);
9657 iemReInitDecoder(pVCpu);
9658 continue;
9659 }
9660#ifdef IN_RING0
9661 rcStrict = VINF_EM_RAW_INTERRUPT;
9662 break;
9663#endif
9664 }
9665 }
9666 }
9667 Assert(!(fCpu & VMCPU_FF_IEM));
9668 }
9669 Assert(pVCpu->iem.s.cActiveMappings == 0);
9670 }
9671 else if (pVCpu->iem.s.cActiveMappings > 0)
9672 iemMemRollback(pVCpu);
9673 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9674 break;
9675 }
9676 }
9677#ifdef IEM_WITH_SETJMP
9678 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9679 {
9680 if (pVCpu->iem.s.cActiveMappings > 0)
9681 iemMemRollback(pVCpu);
9682 pVCpu->iem.s.cLongJumps++;
9683 }
9684 IEM_CATCH_LONGJMP_END(pVCpu);
9685#endif
9686
9687 /*
9688 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9689 */
9690 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9691 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9692 }
9693 else
9694 {
9695 if (pVCpu->iem.s.cActiveMappings > 0)
9696 iemMemRollback(pVCpu);
9697
9698#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9699 /*
9700 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9701 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9702 */
9703 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9704#endif
9705 }
9706
9707 /*
9708 * Maybe re-enter raw-mode and log.
9709 */
9710 if (rcStrict != VINF_SUCCESS)
9711 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
9712 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
9713 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
9714 return rcStrict;
9715}
9716
9717
9718/**
9719 * Injects a trap, fault, abort, software interrupt or external interrupt.
9720 *
9721 * The parameter list matches TRPMQueryTrapAll pretty closely.
9722 *
9723 * @returns Strict VBox status code.
9724 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9725 * @param u8TrapNo The trap number.
9726 * @param enmType What type is it (trap/fault/abort), software
9727 * interrupt or hardware interrupt.
9728 * @param uErrCode The error code if applicable.
9729 * @param uCr2 The CR2 value if applicable.
9730 * @param cbInstr The instruction length (only relevant for
9731 * software interrupts).
9732 */
9733VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
9734 uint8_t cbInstr)
9735{
9736 iemInitDecoder(pVCpu, 0 /*fExecOpts*/); /** @todo wrong init function! */
9737#ifdef DBGFTRACE_ENABLED
9738 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
9739 u8TrapNo, enmType, uErrCode, uCr2);
9740#endif
9741
9742 uint32_t fFlags;
9743 switch (enmType)
9744 {
9745 case TRPM_HARDWARE_INT:
9746 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
9747 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
9748 uErrCode = uCr2 = 0;
9749 break;
9750
9751 case TRPM_SOFTWARE_INT:
9752 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
9753 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
9754 uErrCode = uCr2 = 0;
9755 break;
9756
9757 case TRPM_TRAP:
9758 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
9759 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
9760 if (u8TrapNo == X86_XCPT_PF)
9761 fFlags |= IEM_XCPT_FLAGS_CR2;
9762 switch (u8TrapNo)
9763 {
9764 case X86_XCPT_DF:
9765 case X86_XCPT_TS:
9766 case X86_XCPT_NP:
9767 case X86_XCPT_SS:
9768 case X86_XCPT_PF:
9769 case X86_XCPT_AC:
9770 case X86_XCPT_GP:
9771 fFlags |= IEM_XCPT_FLAGS_ERR;
9772 break;
9773 }
9774 break;
9775
9776 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9777 }
9778
9779 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
9780
9781 if (pVCpu->iem.s.cActiveMappings > 0)
9782 iemMemRollback(pVCpu);
9783
9784 return rcStrict;
9785}
9786
9787
9788/**
9789 * Injects the active TRPM event.
9790 *
9791 * @returns Strict VBox status code.
9792 * @param pVCpu The cross context virtual CPU structure.
9793 */
9794VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
9795{
9796#ifndef IEM_IMPLEMENTS_TASKSWITCH
9797 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
9798#else
9799 uint8_t u8TrapNo;
9800 TRPMEVENT enmType;
9801 uint32_t uErrCode;
9802 RTGCUINTPTR uCr2;
9803 uint8_t cbInstr;
9804 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
9805 if (RT_FAILURE(rc))
9806 return rc;
9807
9808 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
9809 * ICEBP \#DB injection as a special case. */
9810 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
9811#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
9812 if (rcStrict == VINF_SVM_VMEXIT)
9813 rcStrict = VINF_SUCCESS;
9814#endif
9815#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9816 if (rcStrict == VINF_VMX_VMEXIT)
9817 rcStrict = VINF_SUCCESS;
9818#endif
9819 /** @todo Are there any other codes that imply the event was successfully
9820 * delivered to the guest? See @bugref{6607}. */
9821 if ( rcStrict == VINF_SUCCESS
9822 || rcStrict == VINF_IEM_RAISED_XCPT)
9823 TRPMResetTrap(pVCpu);
9824
9825 return rcStrict;
9826#endif
9827}
9828
9829
9830VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
9831{
9832 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
9833 return VERR_NOT_IMPLEMENTED;
9834}
9835
9836
9837VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
9838{
9839 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
9840 return VERR_NOT_IMPLEMENTED;
9841}
9842
9843
9844/**
9845 * Interface for HM and EM for executing string I/O OUT (write) instructions.
9846 *
9847 * This API ASSUMES that the caller has already verified that the guest code is
9848 * allowed to access the I/O port. (The I/O port is in the DX register in the
9849 * guest state.)
9850 *
9851 * @returns Strict VBox status code.
9852 * @param pVCpu The cross context virtual CPU structure.
9853 * @param cbValue The size of the I/O port access (1, 2, or 4).
9854 * @param enmAddrMode The addressing mode.
9855 * @param fRepPrefix Indicates whether a repeat prefix is used
9856 * (doesn't matter which for this instruction).
9857 * @param cbInstr The instruction length in bytes.
9858 * @param iEffSeg The effective segment address.
9859 * @param fIoChecked Whether the access to the I/O port has been
9860 * checked or not. It's typically checked in the
9861 * HM scenario.
9862 */
9863VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
9864 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
9865{
9866 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
9867 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
9868
9869 /*
9870 * State init.
9871 */
9872 iemInitExec(pVCpu, 0 /*fExecOpts*/);
9873
9874 /*
9875 * Switch orgy for getting to the right handler.
9876 */
9877 VBOXSTRICTRC rcStrict;
9878 if (fRepPrefix)
9879 {
9880 switch (enmAddrMode)
9881 {
9882 case IEMMODE_16BIT:
9883 switch (cbValue)
9884 {
9885 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9886 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9887 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9888 default:
9889 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9890 }
9891 break;
9892
9893 case IEMMODE_32BIT:
9894 switch (cbValue)
9895 {
9896 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9897 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9898 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9899 default:
9900 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9901 }
9902 break;
9903
9904 case IEMMODE_64BIT:
9905 switch (cbValue)
9906 {
9907 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9908 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9909 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9910 default:
9911 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9912 }
9913 break;
9914
9915 default:
9916 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
9917 }
9918 }
9919 else
9920 {
9921 switch (enmAddrMode)
9922 {
9923 case IEMMODE_16BIT:
9924 switch (cbValue)
9925 {
9926 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9927 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9928 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9929 default:
9930 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9931 }
9932 break;
9933
9934 case IEMMODE_32BIT:
9935 switch (cbValue)
9936 {
9937 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9938 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9939 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9940 default:
9941 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9942 }
9943 break;
9944
9945 case IEMMODE_64BIT:
9946 switch (cbValue)
9947 {
9948 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9949 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9950 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9951 default:
9952 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9953 }
9954 break;
9955
9956 default:
9957 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
9958 }
9959 }
9960
9961 if (pVCpu->iem.s.cActiveMappings)
9962 iemMemRollback(pVCpu);
9963
9964 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
9965}
9966
9967
9968/**
9969 * Interface for HM and EM for executing string I/O IN (read) instructions.
9970 *
9971 * This API ASSUMES that the caller has already verified that the guest code is
9972 * allowed to access the I/O port. (The I/O port is in the DX register in the
9973 * guest state.)
9974 *
9975 * @returns Strict VBox status code.
9976 * @param pVCpu The cross context virtual CPU structure.
9977 * @param cbValue The size of the I/O port access (1, 2, or 4).
9978 * @param enmAddrMode The addressing mode.
9979 * @param fRepPrefix Indicates whether a repeat prefix is used
9980 * (doesn't matter which for this instruction).
9981 * @param cbInstr The instruction length in bytes.
9982 * @param fIoChecked Whether the access to the I/O port has been
9983 * checked or not. It's typically checked in the
9984 * HM scenario.
9985 */
9986VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
9987 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
9988{
9989 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
9990
9991 /*
9992 * State init.
9993 */
9994 iemInitExec(pVCpu, 0 /*fExecOpts*/);
9995
9996 /*
9997 * Switch orgy for getting to the right handler.
9998 */
9999 VBOXSTRICTRC rcStrict;
10000 if (fRepPrefix)
10001 {
10002 switch (enmAddrMode)
10003 {
10004 case IEMMODE_16BIT:
10005 switch (cbValue)
10006 {
10007 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10008 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10009 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10010 default:
10011 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10012 }
10013 break;
10014
10015 case IEMMODE_32BIT:
10016 switch (cbValue)
10017 {
10018 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10019 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10020 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10021 default:
10022 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10023 }
10024 break;
10025
10026 case IEMMODE_64BIT:
10027 switch (cbValue)
10028 {
10029 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10030 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10031 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10032 default:
10033 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10034 }
10035 break;
10036
10037 default:
10038 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10039 }
10040 }
10041 else
10042 {
10043 switch (enmAddrMode)
10044 {
10045 case IEMMODE_16BIT:
10046 switch (cbValue)
10047 {
10048 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10049 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10050 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10051 default:
10052 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10053 }
10054 break;
10055
10056 case IEMMODE_32BIT:
10057 switch (cbValue)
10058 {
10059 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10060 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10061 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10062 default:
10063 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10064 }
10065 break;
10066
10067 case IEMMODE_64BIT:
10068 switch (cbValue)
10069 {
10070 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10071 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10072 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10073 default:
10074 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10075 }
10076 break;
10077
10078 default:
10079 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10080 }
10081 }
10082
10083 if ( pVCpu->iem.s.cActiveMappings == 0
10084 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
10085 { /* likely */ }
10086 else
10087 {
10088 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
10089 iemMemRollback(pVCpu);
10090 }
10091 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10092}
10093
10094
10095/**
10096 * Interface for rawmode to write execute an OUT instruction.
10097 *
10098 * @returns Strict VBox status code.
10099 * @param pVCpu The cross context virtual CPU structure.
10100 * @param cbInstr The instruction length in bytes.
10101 * @param u16Port The port to read.
10102 * @param fImm Whether the port is specified using an immediate operand or
10103 * using the implicit DX register.
10104 * @param cbReg The register size.
10105 *
10106 * @remarks In ring-0 not all of the state needs to be synced in.
10107 */
10108VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10109{
10110 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10111 Assert(cbReg <= 4 && cbReg != 3);
10112
10113 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10114 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, cbReg,
10115 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10116 Assert(!pVCpu->iem.s.cActiveMappings);
10117 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10118}
10119
10120
10121/**
10122 * Interface for rawmode to write execute an IN instruction.
10123 *
10124 * @returns Strict VBox status code.
10125 * @param pVCpu The cross context virtual CPU structure.
10126 * @param cbInstr The instruction length in bytes.
10127 * @param u16Port The port to read.
10128 * @param fImm Whether the port is specified using an immediate operand or
10129 * using the implicit DX.
10130 * @param cbReg The register size.
10131 */
10132VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10133{
10134 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10135 Assert(cbReg <= 4 && cbReg != 3);
10136
10137 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10138 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, cbReg,
10139 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10140 Assert(!pVCpu->iem.s.cActiveMappings);
10141 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10142}
10143
10144
10145/**
10146 * Interface for HM and EM to write to a CRx register.
10147 *
10148 * @returns Strict VBox status code.
10149 * @param pVCpu The cross context virtual CPU structure.
10150 * @param cbInstr The instruction length in bytes.
10151 * @param iCrReg The control register number (destination).
10152 * @param iGReg The general purpose register number (source).
10153 *
10154 * @remarks In ring-0 not all of the state needs to be synced in.
10155 */
10156VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
10157{
10158 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10159 Assert(iCrReg < 16);
10160 Assert(iGReg < 16);
10161
10162 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10163 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
10164 Assert(!pVCpu->iem.s.cActiveMappings);
10165 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10166}
10167
10168
10169/**
10170 * Interface for HM and EM to read from a CRx register.
10171 *
10172 * @returns Strict VBox status code.
10173 * @param pVCpu The cross context virtual CPU structure.
10174 * @param cbInstr The instruction length in bytes.
10175 * @param iGReg The general purpose register number (destination).
10176 * @param iCrReg The control register number (source).
10177 *
10178 * @remarks In ring-0 not all of the state needs to be synced in.
10179 */
10180VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
10181{
10182 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10183 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
10184 | CPUMCTX_EXTRN_APIC_TPR);
10185 Assert(iCrReg < 16);
10186 Assert(iGReg < 16);
10187
10188 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10189 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
10190 Assert(!pVCpu->iem.s.cActiveMappings);
10191 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10192}
10193
10194
10195/**
10196 * Interface for HM and EM to write to a DRx register.
10197 *
10198 * @returns Strict VBox status code.
10199 * @param pVCpu The cross context virtual CPU structure.
10200 * @param cbInstr The instruction length in bytes.
10201 * @param iDrReg The debug register number (destination).
10202 * @param iGReg The general purpose register number (source).
10203 *
10204 * @remarks In ring-0 not all of the state needs to be synced in.
10205 */
10206VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iDrReg, uint8_t iGReg)
10207{
10208 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10209 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10210 Assert(iDrReg < 8);
10211 Assert(iGReg < 16);
10212
10213 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10214 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Dd_Rd, iDrReg, iGReg);
10215 Assert(!pVCpu->iem.s.cActiveMappings);
10216 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10217}
10218
10219
10220/**
10221 * Interface for HM and EM to read from a DRx register.
10222 *
10223 * @returns Strict VBox status code.
10224 * @param pVCpu The cross context virtual CPU structure.
10225 * @param cbInstr The instruction length in bytes.
10226 * @param iGReg The general purpose register number (destination).
10227 * @param iDrReg The debug register number (source).
10228 *
10229 * @remarks In ring-0 not all of the state needs to be synced in.
10230 */
10231VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iDrReg)
10232{
10233 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10234 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10235 Assert(iDrReg < 8);
10236 Assert(iGReg < 16);
10237
10238 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10239 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Dd, iGReg, iDrReg);
10240 Assert(!pVCpu->iem.s.cActiveMappings);
10241 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10242}
10243
10244
10245/**
10246 * Interface for HM and EM to clear the CR0[TS] bit.
10247 *
10248 * @returns Strict VBox status code.
10249 * @param pVCpu The cross context virtual CPU structure.
10250 * @param cbInstr The instruction length in bytes.
10251 *
10252 * @remarks In ring-0 not all of the state needs to be synced in.
10253 */
10254VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
10255{
10256 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10257
10258 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10259 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
10260 Assert(!pVCpu->iem.s.cActiveMappings);
10261 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10262}
10263
10264
10265/**
10266 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
10267 *
10268 * @returns Strict VBox status code.
10269 * @param pVCpu The cross context virtual CPU structure.
10270 * @param cbInstr The instruction length in bytes.
10271 * @param uValue The value to load into CR0.
10272 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
10273 * memory operand. Otherwise pass NIL_RTGCPTR.
10274 *
10275 * @remarks In ring-0 not all of the state needs to be synced in.
10276 */
10277VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
10278{
10279 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10280
10281 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10282 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
10283 Assert(!pVCpu->iem.s.cActiveMappings);
10284 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10285}
10286
10287
10288/**
10289 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
10290 *
10291 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
10292 *
10293 * @returns Strict VBox status code.
10294 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10295 * @param cbInstr The instruction length in bytes.
10296 * @remarks In ring-0 not all of the state needs to be synced in.
10297 * @thread EMT(pVCpu)
10298 */
10299VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
10300{
10301 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10302
10303 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10304 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
10305 Assert(!pVCpu->iem.s.cActiveMappings);
10306 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10307}
10308
10309
10310/**
10311 * Interface for HM and EM to emulate the WBINVD instruction.
10312 *
10313 * @returns Strict VBox status code.
10314 * @param pVCpu The cross context virtual CPU structure.
10315 * @param cbInstr The instruction length in bytes.
10316 *
10317 * @remarks In ring-0 not all of the state needs to be synced in.
10318 */
10319VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10320{
10321 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10322
10323 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10324 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
10325 Assert(!pVCpu->iem.s.cActiveMappings);
10326 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10327}
10328
10329
10330/**
10331 * Interface for HM and EM to emulate the INVD instruction.
10332 *
10333 * @returns Strict VBox status code.
10334 * @param pVCpu The cross context virtual CPU structure.
10335 * @param cbInstr The instruction length in bytes.
10336 *
10337 * @remarks In ring-0 not all of the state needs to be synced in.
10338 */
10339VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10340{
10341 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10342
10343 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10344 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
10345 Assert(!pVCpu->iem.s.cActiveMappings);
10346 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10347}
10348
10349
10350/**
10351 * Interface for HM and EM to emulate the INVLPG instruction.
10352 *
10353 * @returns Strict VBox status code.
10354 * @retval VINF_PGM_SYNC_CR3
10355 *
10356 * @param pVCpu The cross context virtual CPU structure.
10357 * @param cbInstr The instruction length in bytes.
10358 * @param GCPtrPage The effective address of the page to invalidate.
10359 *
10360 * @remarks In ring-0 not all of the state needs to be synced in.
10361 */
10362VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
10363{
10364 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10365
10366 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10367 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
10368 Assert(!pVCpu->iem.s.cActiveMappings);
10369 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10370}
10371
10372
10373/**
10374 * Interface for HM and EM to emulate the INVPCID instruction.
10375 *
10376 * @returns Strict VBox status code.
10377 * @retval VINF_PGM_SYNC_CR3
10378 *
10379 * @param pVCpu The cross context virtual CPU structure.
10380 * @param cbInstr The instruction length in bytes.
10381 * @param iEffSeg The effective segment register.
10382 * @param GCPtrDesc The effective address of the INVPCID descriptor.
10383 * @param uType The invalidation type.
10384 *
10385 * @remarks In ring-0 not all of the state needs to be synced in.
10386 */
10387VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
10388 uint64_t uType)
10389{
10390 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
10391
10392 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10393 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
10394 Assert(!pVCpu->iem.s.cActiveMappings);
10395 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10396}
10397
10398
10399/**
10400 * Interface for HM and EM to emulate the CPUID instruction.
10401 *
10402 * @returns Strict VBox status code.
10403 *
10404 * @param pVCpu The cross context virtual CPU structure.
10405 * @param cbInstr The instruction length in bytes.
10406 *
10407 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
10408 */
10409VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
10410{
10411 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10412 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
10413
10414 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10415 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
10416 Assert(!pVCpu->iem.s.cActiveMappings);
10417 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10418}
10419
10420
10421/**
10422 * Interface for HM and EM to emulate the RDPMC instruction.
10423 *
10424 * @returns Strict VBox status code.
10425 *
10426 * @param pVCpu The cross context virtual CPU structure.
10427 * @param cbInstr The instruction length in bytes.
10428 *
10429 * @remarks Not all of the state needs to be synced in.
10430 */
10431VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
10432{
10433 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10434 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10435
10436 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10437 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
10438 Assert(!pVCpu->iem.s.cActiveMappings);
10439 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10440}
10441
10442
10443/**
10444 * Interface for HM and EM to emulate the RDTSC instruction.
10445 *
10446 * @returns Strict VBox status code.
10447 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10448 *
10449 * @param pVCpu The cross context virtual CPU structure.
10450 * @param cbInstr The instruction length in bytes.
10451 *
10452 * @remarks Not all of the state needs to be synced in.
10453 */
10454VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
10455{
10456 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10457 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10458
10459 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10460 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
10461 Assert(!pVCpu->iem.s.cActiveMappings);
10462 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10463}
10464
10465
10466/**
10467 * Interface for HM and EM to emulate the RDTSCP instruction.
10468 *
10469 * @returns Strict VBox status code.
10470 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10471 *
10472 * @param pVCpu The cross context virtual CPU structure.
10473 * @param cbInstr The instruction length in bytes.
10474 *
10475 * @remarks Not all of the state needs to be synced in. Recommended
10476 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
10477 */
10478VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
10479{
10480 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10481 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
10482
10483 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10484 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
10485 Assert(!pVCpu->iem.s.cActiveMappings);
10486 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10487}
10488
10489
10490/**
10491 * Interface for HM and EM to emulate the RDMSR instruction.
10492 *
10493 * @returns Strict VBox status code.
10494 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10495 *
10496 * @param pVCpu The cross context virtual CPU structure.
10497 * @param cbInstr The instruction length in bytes.
10498 *
10499 * @remarks Not all of the state needs to be synced in. Requires RCX and
10500 * (currently) all MSRs.
10501 */
10502VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10503{
10504 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10505 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
10506
10507 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10508 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
10509 Assert(!pVCpu->iem.s.cActiveMappings);
10510 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10511}
10512
10513
10514/**
10515 * Interface for HM and EM to emulate the WRMSR instruction.
10516 *
10517 * @returns Strict VBox status code.
10518 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10519 *
10520 * @param pVCpu The cross context virtual CPU structure.
10521 * @param cbInstr The instruction length in bytes.
10522 *
10523 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
10524 * and (currently) all MSRs.
10525 */
10526VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10527{
10528 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10529 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
10530 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
10531
10532 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10533 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
10534 Assert(!pVCpu->iem.s.cActiveMappings);
10535 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10536}
10537
10538
10539/**
10540 * Interface for HM and EM to emulate the MONITOR instruction.
10541 *
10542 * @returns Strict VBox status code.
10543 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10544 *
10545 * @param pVCpu The cross context virtual CPU structure.
10546 * @param cbInstr The instruction length in bytes.
10547 *
10548 * @remarks Not all of the state needs to be synced in.
10549 * @remarks ASSUMES the default segment of DS and no segment override prefixes
10550 * are used.
10551 */
10552VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
10553{
10554 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10555 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
10556
10557 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10558 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
10559 Assert(!pVCpu->iem.s.cActiveMappings);
10560 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10561}
10562
10563
10564/**
10565 * Interface for HM and EM to emulate the MWAIT instruction.
10566 *
10567 * @returns Strict VBox status code.
10568 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10569 *
10570 * @param pVCpu The cross context virtual CPU structure.
10571 * @param cbInstr The instruction length in bytes.
10572 *
10573 * @remarks Not all of the state needs to be synced in.
10574 */
10575VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
10576{
10577 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10578 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
10579
10580 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10581 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
10582 Assert(!pVCpu->iem.s.cActiveMappings);
10583 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10584}
10585
10586
10587/**
10588 * Interface for HM and EM to emulate the HLT instruction.
10589 *
10590 * @returns Strict VBox status code.
10591 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10592 *
10593 * @param pVCpu The cross context virtual CPU structure.
10594 * @param cbInstr The instruction length in bytes.
10595 *
10596 * @remarks Not all of the state needs to be synced in.
10597 */
10598VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
10599{
10600 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10601
10602 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10603 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
10604 Assert(!pVCpu->iem.s.cActiveMappings);
10605 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10606}
10607
10608
10609/**
10610 * Checks if IEM is in the process of delivering an event (interrupt or
10611 * exception).
10612 *
10613 * @returns true if we're in the process of raising an interrupt or exception,
10614 * false otherwise.
10615 * @param pVCpu The cross context virtual CPU structure.
10616 * @param puVector Where to store the vector associated with the
10617 * currently delivered event, optional.
10618 * @param pfFlags Where to store th event delivery flags (see
10619 * IEM_XCPT_FLAGS_XXX), optional.
10620 * @param puErr Where to store the error code associated with the
10621 * event, optional.
10622 * @param puCr2 Where to store the CR2 associated with the event,
10623 * optional.
10624 * @remarks The caller should check the flags to determine if the error code and
10625 * CR2 are valid for the event.
10626 */
10627VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
10628{
10629 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
10630 if (fRaisingXcpt)
10631 {
10632 if (puVector)
10633 *puVector = pVCpu->iem.s.uCurXcpt;
10634 if (pfFlags)
10635 *pfFlags = pVCpu->iem.s.fCurXcpt;
10636 if (puErr)
10637 *puErr = pVCpu->iem.s.uCurXcptErr;
10638 if (puCr2)
10639 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
10640 }
10641 return fRaisingXcpt;
10642}
10643
10644#ifdef IN_RING3
10645
10646/**
10647 * Handles the unlikely and probably fatal merge cases.
10648 *
10649 * @returns Merged status code.
10650 * @param rcStrict Current EM status code.
10651 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
10652 * with @a rcStrict.
10653 * @param iMemMap The memory mapping index. For error reporting only.
10654 * @param pVCpu The cross context virtual CPU structure of the calling
10655 * thread, for error reporting only.
10656 */
10657DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
10658 unsigned iMemMap, PVMCPUCC pVCpu)
10659{
10660 if (RT_FAILURE_NP(rcStrict))
10661 return rcStrict;
10662
10663 if (RT_FAILURE_NP(rcStrictCommit))
10664 return rcStrictCommit;
10665
10666 if (rcStrict == rcStrictCommit)
10667 return rcStrictCommit;
10668
10669 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
10670 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
10671 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
10672 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
10673 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
10674 return VERR_IOM_FF_STATUS_IPE;
10675}
10676
10677
10678/**
10679 * Helper for IOMR3ProcessForceFlag.
10680 *
10681 * @returns Merged status code.
10682 * @param rcStrict Current EM status code.
10683 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
10684 * with @a rcStrict.
10685 * @param iMemMap The memory mapping index. For error reporting only.
10686 * @param pVCpu The cross context virtual CPU structure of the calling
10687 * thread, for error reporting only.
10688 */
10689DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
10690{
10691 /* Simple. */
10692 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
10693 return rcStrictCommit;
10694
10695 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
10696 return rcStrict;
10697
10698 /* EM scheduling status codes. */
10699 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
10700 && rcStrict <= VINF_EM_LAST))
10701 {
10702 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
10703 && rcStrictCommit <= VINF_EM_LAST))
10704 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
10705 }
10706
10707 /* Unlikely */
10708 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
10709}
10710
10711
10712/**
10713 * Called by force-flag handling code when VMCPU_FF_IEM is set.
10714 *
10715 * @returns Merge between @a rcStrict and what the commit operation returned.
10716 * @param pVM The cross context VM structure.
10717 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10718 * @param rcStrict The status code returned by ring-0 or raw-mode.
10719 */
10720VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
10721{
10722 /*
10723 * Reset the pending commit.
10724 */
10725 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
10726 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
10727 ("%#x %#x %#x\n",
10728 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
10729 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
10730
10731 /*
10732 * Commit the pending bounce buffers (usually just one).
10733 */
10734 unsigned cBufs = 0;
10735 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
10736 while (iMemMap-- > 0)
10737 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
10738 {
10739 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
10740 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
10741 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
10742
10743 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
10744 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
10745 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
10746
10747 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
10748 {
10749 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
10750 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
10751 pbBuf,
10752 cbFirst,
10753 PGMACCESSORIGIN_IEM);
10754 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
10755 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
10756 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
10757 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
10758 }
10759
10760 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
10761 {
10762 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
10763 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
10764 pbBuf + cbFirst,
10765 cbSecond,
10766 PGMACCESSORIGIN_IEM);
10767 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
10768 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
10769 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
10770 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
10771 }
10772 cBufs++;
10773 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
10774 }
10775
10776 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
10777 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
10778 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
10779 pVCpu->iem.s.cActiveMappings = 0;
10780 return rcStrict;
10781}
10782
10783#endif /* IN_RING3 */
10784
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette