VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 102424

Last change on this file since 102424 was 102424, checked in by vboxsync, 12 months ago

VMM/IEM: Continue refactoring IEM_MC_MEM_MAP into type specific MCs using bUnmapInfo. bugref:10371

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 447.0 KB
Line 
1/* $Id: IEMAll.cpp 102424 2023-12-01 22:43:39Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_iem IEM - Interpreted Execution Manager
30 *
31 * The interpreted exeuction manager (IEM) is for executing short guest code
32 * sequences that are causing too many exits / virtualization traps. It will
33 * also be used to interpret single instructions, thus replacing the selective
34 * interpreters in EM and IOM.
35 *
36 * Design goals:
37 * - Relatively small footprint, although we favour speed and correctness
38 * over size.
39 * - Reasonably fast.
40 * - Correctly handle lock prefixed instructions.
41 * - Complete instruction set - eventually.
42 * - Refactorable into a recompiler, maybe.
43 * - Replace EMInterpret*.
44 *
45 * Using the existing disassembler has been considered, however this is thought
46 * to conflict with speed as the disassembler chews things a bit too much while
47 * leaving us with a somewhat complicated state to interpret afterwards.
48 *
49 *
50 * The current code is very much work in progress. You've been warned!
51 *
52 *
53 * @section sec_iem_fpu_instr FPU Instructions
54 *
55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
56 * same or equivalent instructions on the host FPU. To make life easy, we also
57 * let the FPU prioritize the unmasked exceptions for us. This however, only
58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
59 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
60 * can trigger spurious FPU exceptions.
61 *
62 * The guest FPU state is not loaded into the host CPU and kept there till we
63 * leave IEM because the calling conventions have declared an all year open
64 * season on much of the FPU state. For instance an innocent looking call to
65 * memcpy might end up using a whole bunch of XMM or MM registers if the
66 * particular implementation finds it worthwhile.
67 *
68 *
69 * @section sec_iem_logging Logging
70 *
71 * The IEM code uses the \"IEM\" log group for the main logging. The different
72 * logging levels/flags are generally used for the following purposes:
73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
74 * - Flow (LogFlow) : Basic enter/exit IEM state info.
75 * - Level 2 (Log2) : ?
76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
78 * - Level 5 (Log5) : Decoding details.
79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
80 * - Level 7 (Log7) : iret++ execution logging.
81 * - Level 8 (Log8) :
82 * - Level 9 (Log9) :
83 * - Level 10 (Log10): TLBs.
84 * - Level 11 (Log11): Unmasked FPU exceptions.
85 *
86 * The \"IEM_MEM\" log group covers most of memory related details logging,
87 * except for errors and exceptions:
88 * - Level 1 (Log) : Reads.
89 * - Level 2 (Log2) : Read fallbacks.
90 * - Level 3 (Log3) : MemMap read.
91 * - Level 4 (Log4) : MemMap read fallbacks.
92 * - Level 5 (Log5) : Writes
93 * - Level 6 (Log6) : Write fallbacks.
94 * - Level 7 (Log7) : MemMap writes and read-writes.
95 * - Level 8 (Log8) : MemMap write and read-write fallbacks.
96 * - Level 9 (Log9) : Stack reads.
97 * - Level 10 (Log10): Stack read fallbacks.
98 * - Level 11 (Log11): Stack writes.
99 * - Level 12 (Log12): Stack write fallbacks.
100 * - Flow (LogFlow) :
101 *
102 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
103 * - Level 1 (Log) : Errors and other major events.
104 * - Flow (LogFlow) : Misc flow stuff (cleanup?)
105 * - Level 2 (Log2) : VM exits.
106 *
107 * The syscall logging level assignments:
108 * - Level 1: DOS and BIOS.
109 * - Level 2: Windows 3.x
110 * - Level 3: Linux.
111 */
112
113/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
114#ifdef _MSC_VER
115# pragma warning(disable:4505)
116#endif
117
118
119/*********************************************************************************************************************************
120* Header Files *
121*********************************************************************************************************************************/
122#define LOG_GROUP LOG_GROUP_IEM
123#define VMCPU_INCL_CPUM_GST_CTX
124#include <VBox/vmm/iem.h>
125#include <VBox/vmm/cpum.h>
126#include <VBox/vmm/apic.h>
127#include <VBox/vmm/pdm.h>
128#include <VBox/vmm/pgm.h>
129#include <VBox/vmm/iom.h>
130#include <VBox/vmm/em.h>
131#include <VBox/vmm/hm.h>
132#include <VBox/vmm/nem.h>
133#include <VBox/vmm/gim.h>
134#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
135# include <VBox/vmm/em.h>
136# include <VBox/vmm/hm_svm.h>
137#endif
138#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
139# include <VBox/vmm/hmvmxinline.h>
140#endif
141#include <VBox/vmm/tm.h>
142#include <VBox/vmm/dbgf.h>
143#include <VBox/vmm/dbgftrace.h>
144#include "IEMInternal.h"
145#include <VBox/vmm/vmcc.h>
146#include <VBox/log.h>
147#include <VBox/err.h>
148#include <VBox/param.h>
149#include <VBox/dis.h>
150#include <iprt/asm-math.h>
151#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
152# include <iprt/asm-amd64-x86.h>
153#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
154# include <iprt/asm-arm.h>
155#endif
156#include <iprt/assert.h>
157#include <iprt/string.h>
158#include <iprt/x86.h>
159
160#include "IEMInline.h"
161
162
163/*********************************************************************************************************************************
164* Structures and Typedefs *
165*********************************************************************************************************************************/
166/**
167 * CPU exception classes.
168 */
169typedef enum IEMXCPTCLASS
170{
171 IEMXCPTCLASS_BENIGN,
172 IEMXCPTCLASS_CONTRIBUTORY,
173 IEMXCPTCLASS_PAGE_FAULT,
174 IEMXCPTCLASS_DOUBLE_FAULT
175} IEMXCPTCLASS;
176
177
178/*********************************************************************************************************************************
179* Global Variables *
180*********************************************************************************************************************************/
181#if defined(IEM_LOG_MEMORY_WRITES)
182/** What IEM just wrote. */
183uint8_t g_abIemWrote[256];
184/** How much IEM just wrote. */
185size_t g_cbIemWrote;
186#endif
187
188
189/*********************************************************************************************************************************
190* Internal Functions *
191*********************************************************************************************************************************/
192static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
193 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
194
195
196/**
197 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags, slow code
198 * path.
199 *
200 * @returns IEM_F_BRK_PENDING_XXX or zero.
201 * @param pVCpu The cross context virtual CPU structure of the
202 * calling thread.
203 *
204 * @note Don't call directly, use iemCalcExecDbgFlags instead.
205 */
206uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu)
207{
208 uint32_t fExec = 0;
209
210 /*
211 * Process guest breakpoints.
212 */
213#define PROCESS_ONE_BP(a_fDr7, a_iBp) do { \
214 if (a_fDr7 & X86_DR7_L_G(a_iBp)) \
215 { \
216 switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \
217 { \
218 case X86_DR7_RW_EO: \
219 fExec |= IEM_F_PENDING_BRK_INSTR; \
220 break; \
221 case X86_DR7_RW_WO: \
222 case X86_DR7_RW_RW: \
223 fExec |= IEM_F_PENDING_BRK_DATA; \
224 break; \
225 case X86_DR7_RW_IO: \
226 fExec |= IEM_F_PENDING_BRK_X86_IO; \
227 break; \
228 } \
229 } \
230 } while (0)
231
232 uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];
233 if (fGstDr7 & X86_DR7_ENABLED_MASK)
234 {
235 PROCESS_ONE_BP(fGstDr7, 0);
236 PROCESS_ONE_BP(fGstDr7, 1);
237 PROCESS_ONE_BP(fGstDr7, 2);
238 PROCESS_ONE_BP(fGstDr7, 3);
239 }
240
241 /*
242 * Process hypervisor breakpoints.
243 */
244 uint32_t const fHyperDr7 = DBGFBpGetDR7(pVCpu->CTX_SUFF(pVM));
245 if (fHyperDr7 & X86_DR7_ENABLED_MASK)
246 {
247 PROCESS_ONE_BP(fHyperDr7, 0);
248 PROCESS_ONE_BP(fHyperDr7, 1);
249 PROCESS_ONE_BP(fHyperDr7, 2);
250 PROCESS_ONE_BP(fHyperDr7, 3);
251 }
252
253 return fExec;
254}
255
256
257/**
258 * Initializes the decoder state.
259 *
260 * iemReInitDecoder is mostly a copy of this function.
261 *
262 * @param pVCpu The cross context virtual CPU structure of the
263 * calling thread.
264 * @param fExecOpts Optional execution flags:
265 * - IEM_F_BYPASS_HANDLERS
266 * - IEM_F_X86_DISREGARD_LOCK
267 */
268DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, uint32_t fExecOpts)
269{
270 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
271 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
272 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
273 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
274 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
275 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
276 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
277 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
278 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
279 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
280
281 /* Execution state: */
282 uint32_t fExec;
283 pVCpu->iem.s.fExec = fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
284
285 /* Decoder state: */
286 pVCpu->iem.s.enmDefAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
287 pVCpu->iem.s.enmEffAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK;
288 if ((fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)
289 {
290 pVCpu->iem.s.enmDefOpSize = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
291 pVCpu->iem.s.enmEffOpSize = fExec & IEM_F_MODE_CPUMODE_MASK;
292 }
293 else
294 {
295 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
296 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
297 }
298 pVCpu->iem.s.fPrefixes = 0;
299 pVCpu->iem.s.uRexReg = 0;
300 pVCpu->iem.s.uRexB = 0;
301 pVCpu->iem.s.uRexIndex = 0;
302 pVCpu->iem.s.idxPrefix = 0;
303 pVCpu->iem.s.uVex3rdReg = 0;
304 pVCpu->iem.s.uVexLength = 0;
305 pVCpu->iem.s.fEvexStuff = 0;
306 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
307#ifdef IEM_WITH_CODE_TLB
308 pVCpu->iem.s.pbInstrBuf = NULL;
309 pVCpu->iem.s.offInstrNextByte = 0;
310 pVCpu->iem.s.offCurInstrStart = 0;
311# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
312 pVCpu->iem.s.offOpcode = 0;
313# endif
314# ifdef VBOX_STRICT
315 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
316 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
317 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
318 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
319# endif
320#else
321 pVCpu->iem.s.offOpcode = 0;
322 pVCpu->iem.s.cbOpcode = 0;
323#endif
324 pVCpu->iem.s.offModRm = 0;
325 pVCpu->iem.s.cActiveMappings = 0;
326 pVCpu->iem.s.iNextMapping = 0;
327 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
328
329#ifdef DBGFTRACE_ENABLED
330 switch (IEM_GET_CPU_MODE(pVCpu))
331 {
332 case IEMMODE_64BIT:
333 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
334 break;
335 case IEMMODE_32BIT:
336 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
337 break;
338 case IEMMODE_16BIT:
339 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
340 break;
341 }
342#endif
343}
344
345
346/**
347 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
348 *
349 * This is mostly a copy of iemInitDecoder.
350 *
351 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
352 */
353DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
354{
355 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
356 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
357 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
358 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
359 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
360 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
361 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
362 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
363 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
364
365 /* ASSUMES: Anyone changing CPU state affecting the fExec bits will update them! */
366 AssertMsg((pVCpu->iem.s.fExec & ~IEM_F_USER_OPTS) == iemCalcExecFlags(pVCpu),
367 ("fExec=%#x iemCalcExecModeFlags=%#x\n", pVCpu->iem.s.fExec, iemCalcExecFlags(pVCpu)));
368
369 IEMMODE const enmMode = IEM_GET_CPU_MODE(pVCpu);
370 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
371 pVCpu->iem.s.enmEffAddrMode = enmMode;
372 if (enmMode != IEMMODE_64BIT)
373 {
374 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
375 pVCpu->iem.s.enmEffOpSize = enmMode;
376 }
377 else
378 {
379 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
380 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
381 }
382 pVCpu->iem.s.fPrefixes = 0;
383 pVCpu->iem.s.uRexReg = 0;
384 pVCpu->iem.s.uRexB = 0;
385 pVCpu->iem.s.uRexIndex = 0;
386 pVCpu->iem.s.idxPrefix = 0;
387 pVCpu->iem.s.uVex3rdReg = 0;
388 pVCpu->iem.s.uVexLength = 0;
389 pVCpu->iem.s.fEvexStuff = 0;
390 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
391#ifdef IEM_WITH_CODE_TLB
392 if (pVCpu->iem.s.pbInstrBuf)
393 {
394 uint64_t off = (enmMode == IEMMODE_64BIT
395 ? pVCpu->cpum.GstCtx.rip
396 : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
397 - pVCpu->iem.s.uInstrBufPc;
398 if (off < pVCpu->iem.s.cbInstrBufTotal)
399 {
400 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
401 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
402 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
403 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
404 else
405 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
406 }
407 else
408 {
409 pVCpu->iem.s.pbInstrBuf = NULL;
410 pVCpu->iem.s.offInstrNextByte = 0;
411 pVCpu->iem.s.offCurInstrStart = 0;
412 pVCpu->iem.s.cbInstrBuf = 0;
413 pVCpu->iem.s.cbInstrBufTotal = 0;
414 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
415 }
416 }
417 else
418 {
419 pVCpu->iem.s.offInstrNextByte = 0;
420 pVCpu->iem.s.offCurInstrStart = 0;
421 pVCpu->iem.s.cbInstrBuf = 0;
422 pVCpu->iem.s.cbInstrBufTotal = 0;
423# ifdef VBOX_STRICT
424 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
425# endif
426 }
427# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
428 pVCpu->iem.s.offOpcode = 0;
429# endif
430#else /* !IEM_WITH_CODE_TLB */
431 pVCpu->iem.s.cbOpcode = 0;
432 pVCpu->iem.s.offOpcode = 0;
433#endif /* !IEM_WITH_CODE_TLB */
434 pVCpu->iem.s.offModRm = 0;
435 Assert(pVCpu->iem.s.cActiveMappings == 0);
436 pVCpu->iem.s.iNextMapping = 0;
437 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
438 Assert(!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS));
439
440#ifdef DBGFTRACE_ENABLED
441 switch (enmMode)
442 {
443 case IEMMODE_64BIT:
444 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
445 break;
446 case IEMMODE_32BIT:
447 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
448 break;
449 case IEMMODE_16BIT:
450 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
451 break;
452 }
453#endif
454}
455
456
457
458/**
459 * Prefetch opcodes the first time when starting executing.
460 *
461 * @returns Strict VBox status code.
462 * @param pVCpu The cross context virtual CPU structure of the
463 * calling thread.
464 * @param fExecOpts Optional execution flags:
465 * - IEM_F_BYPASS_HANDLERS
466 * - IEM_F_X86_DISREGARD_LOCK
467 */
468static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
469{
470 iemInitDecoder(pVCpu, fExecOpts);
471
472#ifndef IEM_WITH_CODE_TLB
473 /*
474 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
475 *
476 * First translate CS:rIP to a physical address.
477 *
478 * Note! The iemOpcodeFetchMoreBytes code depends on this here code to fetch
479 * all relevant bytes from the first page, as it ASSUMES it's only ever
480 * called for dealing with CS.LIM, page crossing and instructions that
481 * are too long.
482 */
483 uint32_t cbToTryRead;
484 RTGCPTR GCPtrPC;
485 if (IEM_IS_64BIT_CODE(pVCpu))
486 {
487 cbToTryRead = GUEST_PAGE_SIZE;
488 GCPtrPC = pVCpu->cpum.GstCtx.rip;
489 if (IEM_IS_CANONICAL(GCPtrPC))
490 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
491 else
492 return iemRaiseGeneralProtectionFault0(pVCpu);
493 }
494 else
495 {
496 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
497 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu), ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
498 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
499 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
500 else
501 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
502 if (cbToTryRead) { /* likely */ }
503 else /* overflowed */
504 {
505 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
506 cbToTryRead = UINT32_MAX;
507 }
508 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
509 Assert(GCPtrPC <= UINT32_MAX);
510 }
511
512 PGMPTWALK Walk;
513 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk);
514 if (RT_SUCCESS(rc))
515 Assert(Walk.fSucceeded); /* probable. */
516 else
517 {
518 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
519# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
520 if (Walk.fFailed & PGM_WALKFAIL_EPT)
521 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
522# endif
523 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);
524 }
525 if ((Walk.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3) { /* likely */ }
526 else
527 {
528 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
529# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
530 if (Walk.fFailed & PGM_WALKFAIL_EPT)
531 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
532# endif
533 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
534 }
535 if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
536 else
537 {
538 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
539# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
540 if (Walk.fFailed & PGM_WALKFAIL_EPT)
541 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
542# endif
543 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
544 }
545 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
546 /** @todo Check reserved bits and such stuff. PGM is better at doing
547 * that, so do it when implementing the guest virtual address
548 * TLB... */
549
550 /*
551 * Read the bytes at this address.
552 */
553 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
554 if (cbToTryRead > cbLeftOnPage)
555 cbToTryRead = cbLeftOnPage;
556 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
557 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
558
559 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
560 {
561 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
562 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
563 { /* likely */ }
564 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
565 {
566 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
567 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
568 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
569 }
570 else
571 {
572 Log((RT_SUCCESS(rcStrict)
573 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
574 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
575 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
576 return rcStrict;
577 }
578 }
579 else
580 {
581 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
582 if (RT_SUCCESS(rc))
583 { /* likely */ }
584 else
585 {
586 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
587 GCPtrPC, GCPhys, rc, cbToTryRead));
588 return rc;
589 }
590 }
591 pVCpu->iem.s.cbOpcode = cbToTryRead;
592#endif /* !IEM_WITH_CODE_TLB */
593 return VINF_SUCCESS;
594}
595
596
597/**
598 * Invalidates the IEM TLBs.
599 *
600 * This is called internally as well as by PGM when moving GC mappings.
601 *
602 * @param pVCpu The cross context virtual CPU structure of the calling
603 * thread.
604 */
605VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
606{
607#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
608 Log10(("IEMTlbInvalidateAll\n"));
609# ifdef IEM_WITH_CODE_TLB
610 pVCpu->iem.s.cbInstrBufTotal = 0;
611 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
612 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
613 { /* very likely */ }
614 else
615 {
616 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
617 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
618 while (i-- > 0)
619 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
620 }
621# endif
622
623# ifdef IEM_WITH_DATA_TLB
624 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
625 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
626 { /* very likely */ }
627 else
628 {
629 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
630 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
631 while (i-- > 0)
632 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
633 }
634# endif
635#else
636 RT_NOREF(pVCpu);
637#endif
638}
639
640
641/**
642 * Invalidates a page in the TLBs.
643 *
644 * @param pVCpu The cross context virtual CPU structure of the calling
645 * thread.
646 * @param GCPtr The address of the page to invalidate
647 * @thread EMT(pVCpu)
648 */
649VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
650{
651#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
652 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
653 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
654 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
655 uintptr_t const idx = IEMTLB_TAG_TO_INDEX(GCPtr);
656
657# ifdef IEM_WITH_CODE_TLB
658 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
659 {
660 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
661 if (GCPtr == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
662 pVCpu->iem.s.cbInstrBufTotal = 0;
663 }
664# endif
665
666# ifdef IEM_WITH_DATA_TLB
667 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
668 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
669# endif
670#else
671 NOREF(pVCpu); NOREF(GCPtr);
672#endif
673}
674
675
676#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
677/**
678 * Invalid both TLBs slow fashion following a rollover.
679 *
680 * Worker for IEMTlbInvalidateAllPhysical,
681 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
682 * iemMemMapJmp and others.
683 *
684 * @thread EMT(pVCpu)
685 */
686static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
687{
688 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
689 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
690 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
691
692 unsigned i;
693# ifdef IEM_WITH_CODE_TLB
694 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
695 while (i-- > 0)
696 {
697 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
698 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
699 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
700 }
701# endif
702# ifdef IEM_WITH_DATA_TLB
703 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
704 while (i-- > 0)
705 {
706 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
707 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
708 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
709 }
710# endif
711
712}
713#endif
714
715
716/**
717 * Invalidates the host physical aspects of the IEM TLBs.
718 *
719 * This is called internally as well as by PGM when moving GC mappings.
720 *
721 * @param pVCpu The cross context virtual CPU structure of the calling
722 * thread.
723 * @note Currently not used.
724 */
725VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
726{
727#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
728 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
729 Log10(("IEMTlbInvalidateAllPhysical\n"));
730
731# ifdef IEM_WITH_CODE_TLB
732 pVCpu->iem.s.cbInstrBufTotal = 0;
733# endif
734 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
735 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
736 {
737 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
738 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
739 }
740 else
741 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
742#else
743 NOREF(pVCpu);
744#endif
745}
746
747
748/**
749 * Invalidates the host physical aspects of the IEM TLBs.
750 *
751 * This is called internally as well as by PGM when moving GC mappings.
752 *
753 * @param pVM The cross context VM structure.
754 * @param idCpuCaller The ID of the calling EMT if available to the caller,
755 * otherwise NIL_VMCPUID.
756 * @param enmReason The reason we're called.
757 *
758 * @remarks Caller holds the PGM lock.
759 */
760VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller, IEMTLBPHYSFLUSHREASON enmReason)
761{
762#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
763 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
764 if (pVCpuCaller)
765 VMCPU_ASSERT_EMT(pVCpuCaller);
766 Log10(("IEMTlbInvalidateAllPhysicalAllCpus: %d\n", enmReason)); RT_NOREF(enmReason);
767
768 VMCC_FOR_EACH_VMCPU(pVM)
769 {
770# ifdef IEM_WITH_CODE_TLB
771 if (pVCpuCaller == pVCpu)
772 pVCpu->iem.s.cbInstrBufTotal = 0;
773# endif
774
775 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
776 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
777 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
778 { /* likely */}
779 else if (pVCpuCaller == pVCpu)
780 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
781 else
782 {
783 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
784 continue;
785 }
786 ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
787 ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
788 }
789 VMCC_FOR_EACH_VMCPU_END(pVM);
790
791#else
792 RT_NOREF(pVM, idCpuCaller, enmReason);
793#endif
794}
795
796
797/**
798 * Flushes the prefetch buffer, light version.
799 */
800void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr)
801{
802#ifndef IEM_WITH_CODE_TLB
803 pVCpu->iem.s.cbOpcode = cbInstr;
804#else
805 RT_NOREF(pVCpu, cbInstr);
806#endif
807}
808
809
810/**
811 * Flushes the prefetch buffer, heavy version.
812 */
813void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr)
814{
815#ifndef IEM_WITH_CODE_TLB
816 pVCpu->iem.s.cbOpcode = cbInstr; /* Note! SVM and VT-x may set this to zero on exit, rather than the instruction length. */
817#elif 1
818 pVCpu->iem.s.pbInstrBuf = NULL;
819 pVCpu->iem.s.cbInstrBufTotal = 0;
820 RT_NOREF(cbInstr);
821#else
822 RT_NOREF(pVCpu, cbInstr);
823#endif
824}
825
826
827
828#ifdef IEM_WITH_CODE_TLB
829
830/**
831 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
832 * failure and jumps.
833 *
834 * We end up here for a number of reasons:
835 * - pbInstrBuf isn't yet initialized.
836 * - Advancing beyond the buffer boundrary (e.g. cross page).
837 * - Advancing beyond the CS segment limit.
838 * - Fetching from non-mappable page (e.g. MMIO).
839 *
840 * @param pVCpu The cross context virtual CPU structure of the
841 * calling thread.
842 * @param pvDst Where to return the bytes.
843 * @param cbDst Number of bytes to read. A value of zero is
844 * allowed for initializing pbInstrBuf (the
845 * recompiler does this). In this case it is best
846 * to set pbInstrBuf to NULL prior to the call.
847 */
848void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP
849{
850# ifdef IN_RING3
851 for (;;)
852 {
853 Assert(cbDst <= 8);
854 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
855
856 /*
857 * We might have a partial buffer match, deal with that first to make the
858 * rest simpler. This is the first part of the cross page/buffer case.
859 */
860 if (pVCpu->iem.s.pbInstrBuf != NULL)
861 {
862 if (offBuf < pVCpu->iem.s.cbInstrBuf)
863 {
864 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
865 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
866 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
867
868 cbDst -= cbCopy;
869 pvDst = (uint8_t *)pvDst + cbCopy;
870 offBuf += cbCopy;
871 pVCpu->iem.s.offInstrNextByte += offBuf;
872 }
873 }
874
875 /*
876 * Check segment limit, figuring how much we're allowed to access at this point.
877 *
878 * We will fault immediately if RIP is past the segment limit / in non-canonical
879 * territory. If we do continue, there are one or more bytes to read before we
880 * end up in trouble and we need to do that first before faulting.
881 */
882 RTGCPTR GCPtrFirst;
883 uint32_t cbMaxRead;
884 if (IEM_IS_64BIT_CODE(pVCpu))
885 {
886 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
887 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
888 { /* likely */ }
889 else
890 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
891 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
892 }
893 else
894 {
895 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
896 /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
897 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
898 { /* likely */ }
899 else /** @todo For CPUs older than the 386, we should not necessarily generate \#GP here but wrap around! */
900 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
901 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
902 if (cbMaxRead != 0)
903 { /* likely */ }
904 else
905 {
906 /* Overflowed because address is 0 and limit is max. */
907 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
908 cbMaxRead = X86_PAGE_SIZE;
909 }
910 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
911 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
912 if (cbMaxRead2 < cbMaxRead)
913 cbMaxRead = cbMaxRead2;
914 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
915 }
916
917 /*
918 * Get the TLB entry for this piece of code.
919 */
920 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.CodeTlb, GCPtrFirst);
921 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.CodeTlb, uTag);
922 if (pTlbe->uTag == uTag)
923 {
924 /* likely when executing lots of code, otherwise unlikely */
925# ifdef VBOX_WITH_STATISTICS
926 pVCpu->iem.s.CodeTlb.cTlbHits++;
927# endif
928 }
929 else
930 {
931 pVCpu->iem.s.CodeTlb.cTlbMisses++;
932 PGMPTWALK Walk;
933 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk);
934 if (RT_FAILURE(rc))
935 {
936#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
937 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? */
938 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
939#endif
940 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
941 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, rc);
942 }
943
944 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
945 Assert(Walk.fSucceeded);
946 pTlbe->uTag = uTag;
947 pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
948 | (Walk.fEffective >> X86_PTE_PAE_BIT_NX);
949 pTlbe->GCPhys = Walk.GCPhys;
950 pTlbe->pbMappingR3 = NULL;
951 }
952
953 /*
954 * Check TLB page table level access flags.
955 */
956 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
957 {
958 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && IEM_GET_CPL(pVCpu) == 3)
959 {
960 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
961 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
962 }
963 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
964 {
965 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
966 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
967 }
968 }
969
970 /*
971 * Look up the physical page info if necessary.
972 */
973 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
974 { /* not necessary */ }
975 else
976 {
977 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
978 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
979 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
980 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
981 AssertCompile(PGMIEMGCPHYS2PTR_F_CODE_PAGE == IEMTLBE_F_PG_CODE_PAGE);
982 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
983 { /* likely */ }
984 else
985 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
986 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
987 | IEMTLBE_F_NO_MAPPINGR3
988 | IEMTLBE_F_PG_NO_READ
989 | IEMTLBE_F_PG_NO_WRITE
990 | IEMTLBE_F_PG_UNASSIGNED
991 | IEMTLBE_F_PG_CODE_PAGE);
992 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
993 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
994 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
995 }
996
997# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
998 /*
999 * Try do a direct read using the pbMappingR3 pointer.
1000 */
1001 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1002 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1003 {
1004 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1005 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1006 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1007 {
1008 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1009 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1010 }
1011 else
1012 {
1013 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1014 if (cbInstr + (uint32_t)cbDst <= 15)
1015 {
1016 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1017 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1018 }
1019 else
1020 {
1021 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1022 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1023 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1024 }
1025 }
1026 if (cbDst <= cbMaxRead)
1027 {
1028 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0; /** @todo Spurious load effect on branch handling? */
1029 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1030
1031 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1032 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1033 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1034 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1035 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1036 return;
1037 }
1038 pVCpu->iem.s.pbInstrBuf = NULL;
1039
1040 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1041 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1042 }
1043# else
1044# error "refactor as needed"
1045 /*
1046 * If there is no special read handling, so we can read a bit more and
1047 * put it in the prefetch buffer.
1048 */
1049 if ( cbDst < cbMaxRead
1050 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1051 {
1052 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1053 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1054 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1055 { /* likely */ }
1056 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1057 {
1058 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1059 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1060 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1061 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));
1062 }
1063 else
1064 {
1065 Log((RT_SUCCESS(rcStrict)
1066 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1067 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1068 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1069 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1070 }
1071 }
1072# endif
1073 /*
1074 * Special read handling, so only read exactly what's needed.
1075 * This is a highly unlikely scenario.
1076 */
1077 else
1078 {
1079 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1080
1081 /* Check instruction length. */
1082 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1083 if (RT_LIKELY(cbInstr + cbDst <= 15))
1084 { /* likely */ }
1085 else
1086 {
1087 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0) [slow]\n",
1088 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1089 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1090 }
1091
1092 /* Do the reading. */
1093 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1094 if (cbToRead > 0)
1095 {
1096 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1097 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1098 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1099 { /* likely */ }
1100 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1101 {
1102 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1103 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1104 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1105 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));
1106 }
1107 else
1108 {
1109 Log((RT_SUCCESS(rcStrict)
1110 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1111 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1112 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1113 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1114 }
1115 }
1116
1117 /* Update the state and probably return. */
1118 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1119 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0;
1120 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1121
1122 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1123 pVCpu->iem.s.offInstrNextByte = offPg + cbInstr + cbToRead;
1124 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead + cbInstr) - cbToRead - cbInstr;
1125 pVCpu->iem.s.cbInstrBufTotal = X86_PAGE_SIZE; /** @todo ??? */
1126 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1127 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1128 pVCpu->iem.s.pbInstrBuf = NULL;
1129 if (cbToRead == cbDst)
1130 return;
1131 }
1132
1133 /*
1134 * More to read, loop.
1135 */
1136 cbDst -= cbMaxRead;
1137 pvDst = (uint8_t *)pvDst + cbMaxRead;
1138 }
1139# else /* !IN_RING3 */
1140 RT_NOREF(pvDst, cbDst);
1141 if (pvDst || cbDst)
1142 IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);
1143# endif /* !IN_RING3 */
1144}
1145
1146#else /* !IEM_WITH_CODE_TLB */
1147
1148/**
1149 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1150 * exception if it fails.
1151 *
1152 * @returns Strict VBox status code.
1153 * @param pVCpu The cross context virtual CPU structure of the
1154 * calling thread.
1155 * @param cbMin The minimum number of bytes relative offOpcode
1156 * that must be read.
1157 */
1158VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
1159{
1160 /*
1161 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1162 *
1163 * First translate CS:rIP to a physical address.
1164 */
1165 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;
1166 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1167 uint8_t const cbLeft = cbOpcode - offOpcode;
1168 Assert(cbLeft < cbMin);
1169 Assert(cbOpcode <= sizeof(pVCpu->iem.s.abOpcode));
1170
1171 uint32_t cbToTryRead;
1172 RTGCPTR GCPtrNext;
1173 if (IEM_IS_64BIT_CODE(pVCpu))
1174 {
1175 GCPtrNext = pVCpu->cpum.GstCtx.rip + cbOpcode;
1176 if (!IEM_IS_CANONICAL(GCPtrNext))
1177 return iemRaiseGeneralProtectionFault0(pVCpu);
1178 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1179 }
1180 else
1181 {
1182 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1183 /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
1184 GCPtrNext32 += cbOpcode;
1185 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1186 /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
1187 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1188 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1189 if (!cbToTryRead) /* overflowed */
1190 {
1191 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1192 cbToTryRead = UINT32_MAX;
1193 /** @todo check out wrapping around the code segment. */
1194 }
1195 if (cbToTryRead < cbMin - cbLeft)
1196 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1197 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1198
1199 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1200 if (cbToTryRead > cbLeftOnPage)
1201 cbToTryRead = cbLeftOnPage;
1202 }
1203
1204 /* Restrict to opcode buffer space.
1205
1206 We're making ASSUMPTIONS here based on work done previously in
1207 iemInitDecoderAndPrefetchOpcodes, where bytes from the first page will
1208 be fetched in case of an instruction crossing two pages. */
1209 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - cbOpcode)
1210 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - cbOpcode;
1211 if (RT_LIKELY(cbToTryRead + cbLeft >= cbMin))
1212 { /* likely */ }
1213 else
1214 {
1215 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1216 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, offOpcode, cbMin));
1217 return iemRaiseGeneralProtectionFault0(pVCpu);
1218 }
1219
1220 PGMPTWALK Walk;
1221 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk);
1222 if (RT_FAILURE(rc))
1223 {
1224 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1225#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1226 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1227 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1228#endif
1229 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc);
1230 }
1231 if (!(Walk.fEffective & X86_PTE_US) && IEM_GET_CPL(pVCpu) == 3)
1232 {
1233 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1234#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1235 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1236 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1237#endif
1238 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1239 }
1240 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1241 {
1242 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1243#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1244 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1245 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1246#endif
1247 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1248 }
1249 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1250 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode));
1251 /** @todo Check reserved bits and such stuff. PGM is better at doing
1252 * that, so do it when implementing the guest virtual address
1253 * TLB... */
1254
1255 /*
1256 * Read the bytes at this address.
1257 *
1258 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1259 * and since PATM should only patch the start of an instruction there
1260 * should be no need to check again here.
1261 */
1262 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
1263 {
1264 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[cbOpcode],
1265 cbToTryRead, PGMACCESSORIGIN_IEM);
1266 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1267 { /* likely */ }
1268 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1269 {
1270 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1271 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1272 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1273 }
1274 else
1275 {
1276 Log((RT_SUCCESS(rcStrict)
1277 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1278 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1279 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1280 return rcStrict;
1281 }
1282 }
1283 else
1284 {
1285 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[cbOpcode], GCPhys, cbToTryRead);
1286 if (RT_SUCCESS(rc))
1287 { /* likely */ }
1288 else
1289 {
1290 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1291 return rc;
1292 }
1293 }
1294 pVCpu->iem.s.cbOpcode = cbOpcode + cbToTryRead;
1295 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1296
1297 return VINF_SUCCESS;
1298}
1299
1300#endif /* !IEM_WITH_CODE_TLB */
1301#ifndef IEM_WITH_SETJMP
1302
1303/**
1304 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1305 *
1306 * @returns Strict VBox status code.
1307 * @param pVCpu The cross context virtual CPU structure of the
1308 * calling thread.
1309 * @param pb Where to return the opcode byte.
1310 */
1311VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1312{
1313 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1314 if (rcStrict == VINF_SUCCESS)
1315 {
1316 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1317 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1318 pVCpu->iem.s.offOpcode = offOpcode + 1;
1319 }
1320 else
1321 *pb = 0;
1322 return rcStrict;
1323}
1324
1325#else /* IEM_WITH_SETJMP */
1326
1327/**
1328 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1329 *
1330 * @returns The opcode byte.
1331 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1332 */
1333uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1334{
1335# ifdef IEM_WITH_CODE_TLB
1336 uint8_t u8;
1337 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1338 return u8;
1339# else
1340 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1341 if (rcStrict == VINF_SUCCESS)
1342 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1343 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1344# endif
1345}
1346
1347#endif /* IEM_WITH_SETJMP */
1348
1349#ifndef IEM_WITH_SETJMP
1350
1351/**
1352 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1353 *
1354 * @returns Strict VBox status code.
1355 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1356 * @param pu16 Where to return the opcode dword.
1357 */
1358VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1359{
1360 uint8_t u8;
1361 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1362 if (rcStrict == VINF_SUCCESS)
1363 *pu16 = (int8_t)u8;
1364 return rcStrict;
1365}
1366
1367
1368/**
1369 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1370 *
1371 * @returns Strict VBox status code.
1372 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1373 * @param pu32 Where to return the opcode dword.
1374 */
1375VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1376{
1377 uint8_t u8;
1378 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1379 if (rcStrict == VINF_SUCCESS)
1380 *pu32 = (int8_t)u8;
1381 return rcStrict;
1382}
1383
1384
1385/**
1386 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1387 *
1388 * @returns Strict VBox status code.
1389 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1390 * @param pu64 Where to return the opcode qword.
1391 */
1392VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1393{
1394 uint8_t u8;
1395 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1396 if (rcStrict == VINF_SUCCESS)
1397 *pu64 = (int8_t)u8;
1398 return rcStrict;
1399}
1400
1401#endif /* !IEM_WITH_SETJMP */
1402
1403
1404#ifndef IEM_WITH_SETJMP
1405
1406/**
1407 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1408 *
1409 * @returns Strict VBox status code.
1410 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1411 * @param pu16 Where to return the opcode word.
1412 */
1413VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1414{
1415 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1416 if (rcStrict == VINF_SUCCESS)
1417 {
1418 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1419# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1420 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1421# else
1422 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1423# endif
1424 pVCpu->iem.s.offOpcode = offOpcode + 2;
1425 }
1426 else
1427 *pu16 = 0;
1428 return rcStrict;
1429}
1430
1431#else /* IEM_WITH_SETJMP */
1432
1433/**
1434 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1435 *
1436 * @returns The opcode word.
1437 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1438 */
1439uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1440{
1441# ifdef IEM_WITH_CODE_TLB
1442 uint16_t u16;
1443 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1444 return u16;
1445# else
1446 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1447 if (rcStrict == VINF_SUCCESS)
1448 {
1449 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1450 pVCpu->iem.s.offOpcode += 2;
1451# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1452 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1453# else
1454 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1455# endif
1456 }
1457 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1458# endif
1459}
1460
1461#endif /* IEM_WITH_SETJMP */
1462
1463#ifndef IEM_WITH_SETJMP
1464
1465/**
1466 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1467 *
1468 * @returns Strict VBox status code.
1469 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1470 * @param pu32 Where to return the opcode double word.
1471 */
1472VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1473{
1474 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1475 if (rcStrict == VINF_SUCCESS)
1476 {
1477 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1478 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1479 pVCpu->iem.s.offOpcode = offOpcode + 2;
1480 }
1481 else
1482 *pu32 = 0;
1483 return rcStrict;
1484}
1485
1486
1487/**
1488 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1489 *
1490 * @returns Strict VBox status code.
1491 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1492 * @param pu64 Where to return the opcode quad word.
1493 */
1494VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1495{
1496 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1497 if (rcStrict == VINF_SUCCESS)
1498 {
1499 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1500 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1501 pVCpu->iem.s.offOpcode = offOpcode + 2;
1502 }
1503 else
1504 *pu64 = 0;
1505 return rcStrict;
1506}
1507
1508#endif /* !IEM_WITH_SETJMP */
1509
1510#ifndef IEM_WITH_SETJMP
1511
1512/**
1513 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1514 *
1515 * @returns Strict VBox status code.
1516 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1517 * @param pu32 Where to return the opcode dword.
1518 */
1519VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1520{
1521 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1522 if (rcStrict == VINF_SUCCESS)
1523 {
1524 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1525# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1526 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1527# else
1528 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1529 pVCpu->iem.s.abOpcode[offOpcode + 1],
1530 pVCpu->iem.s.abOpcode[offOpcode + 2],
1531 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1532# endif
1533 pVCpu->iem.s.offOpcode = offOpcode + 4;
1534 }
1535 else
1536 *pu32 = 0;
1537 return rcStrict;
1538}
1539
1540#else /* IEM_WITH_SETJMP */
1541
1542/**
1543 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1544 *
1545 * @returns The opcode dword.
1546 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1547 */
1548uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1549{
1550# ifdef IEM_WITH_CODE_TLB
1551 uint32_t u32;
1552 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1553 return u32;
1554# else
1555 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1556 if (rcStrict == VINF_SUCCESS)
1557 {
1558 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1559 pVCpu->iem.s.offOpcode = offOpcode + 4;
1560# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1561 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1562# else
1563 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1564 pVCpu->iem.s.abOpcode[offOpcode + 1],
1565 pVCpu->iem.s.abOpcode[offOpcode + 2],
1566 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1567# endif
1568 }
1569 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1570# endif
1571}
1572
1573#endif /* IEM_WITH_SETJMP */
1574
1575#ifndef IEM_WITH_SETJMP
1576
1577/**
1578 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1579 *
1580 * @returns Strict VBox status code.
1581 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1582 * @param pu64 Where to return the opcode dword.
1583 */
1584VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1585{
1586 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1587 if (rcStrict == VINF_SUCCESS)
1588 {
1589 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1590 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1591 pVCpu->iem.s.abOpcode[offOpcode + 1],
1592 pVCpu->iem.s.abOpcode[offOpcode + 2],
1593 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1594 pVCpu->iem.s.offOpcode = offOpcode + 4;
1595 }
1596 else
1597 *pu64 = 0;
1598 return rcStrict;
1599}
1600
1601
1602/**
1603 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1604 *
1605 * @returns Strict VBox status code.
1606 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1607 * @param pu64 Where to return the opcode qword.
1608 */
1609VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1610{
1611 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1612 if (rcStrict == VINF_SUCCESS)
1613 {
1614 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1615 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1616 pVCpu->iem.s.abOpcode[offOpcode + 1],
1617 pVCpu->iem.s.abOpcode[offOpcode + 2],
1618 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1619 pVCpu->iem.s.offOpcode = offOpcode + 4;
1620 }
1621 else
1622 *pu64 = 0;
1623 return rcStrict;
1624}
1625
1626#endif /* !IEM_WITH_SETJMP */
1627
1628#ifndef IEM_WITH_SETJMP
1629
1630/**
1631 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1632 *
1633 * @returns Strict VBox status code.
1634 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1635 * @param pu64 Where to return the opcode qword.
1636 */
1637VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1638{
1639 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1640 if (rcStrict == VINF_SUCCESS)
1641 {
1642 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1643# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1644 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1645# else
1646 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1647 pVCpu->iem.s.abOpcode[offOpcode + 1],
1648 pVCpu->iem.s.abOpcode[offOpcode + 2],
1649 pVCpu->iem.s.abOpcode[offOpcode + 3],
1650 pVCpu->iem.s.abOpcode[offOpcode + 4],
1651 pVCpu->iem.s.abOpcode[offOpcode + 5],
1652 pVCpu->iem.s.abOpcode[offOpcode + 6],
1653 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1654# endif
1655 pVCpu->iem.s.offOpcode = offOpcode + 8;
1656 }
1657 else
1658 *pu64 = 0;
1659 return rcStrict;
1660}
1661
1662#else /* IEM_WITH_SETJMP */
1663
1664/**
1665 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1666 *
1667 * @returns The opcode qword.
1668 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1669 */
1670uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1671{
1672# ifdef IEM_WITH_CODE_TLB
1673 uint64_t u64;
1674 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1675 return u64;
1676# else
1677 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1678 if (rcStrict == VINF_SUCCESS)
1679 {
1680 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1681 pVCpu->iem.s.offOpcode = offOpcode + 8;
1682# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1683 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1684# else
1685 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1686 pVCpu->iem.s.abOpcode[offOpcode + 1],
1687 pVCpu->iem.s.abOpcode[offOpcode + 2],
1688 pVCpu->iem.s.abOpcode[offOpcode + 3],
1689 pVCpu->iem.s.abOpcode[offOpcode + 4],
1690 pVCpu->iem.s.abOpcode[offOpcode + 5],
1691 pVCpu->iem.s.abOpcode[offOpcode + 6],
1692 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1693# endif
1694 }
1695 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1696# endif
1697}
1698
1699#endif /* IEM_WITH_SETJMP */
1700
1701
1702
1703/** @name Misc Worker Functions.
1704 * @{
1705 */
1706
1707/**
1708 * Gets the exception class for the specified exception vector.
1709 *
1710 * @returns The class of the specified exception.
1711 * @param uVector The exception vector.
1712 */
1713static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
1714{
1715 Assert(uVector <= X86_XCPT_LAST);
1716 switch (uVector)
1717 {
1718 case X86_XCPT_DE:
1719 case X86_XCPT_TS:
1720 case X86_XCPT_NP:
1721 case X86_XCPT_SS:
1722 case X86_XCPT_GP:
1723 case X86_XCPT_SX: /* AMD only */
1724 return IEMXCPTCLASS_CONTRIBUTORY;
1725
1726 case X86_XCPT_PF:
1727 case X86_XCPT_VE: /* Intel only */
1728 return IEMXCPTCLASS_PAGE_FAULT;
1729
1730 case X86_XCPT_DF:
1731 return IEMXCPTCLASS_DOUBLE_FAULT;
1732 }
1733 return IEMXCPTCLASS_BENIGN;
1734}
1735
1736
1737/**
1738 * Evaluates how to handle an exception caused during delivery of another event
1739 * (exception / interrupt).
1740 *
1741 * @returns How to handle the recursive exception.
1742 * @param pVCpu The cross context virtual CPU structure of the
1743 * calling thread.
1744 * @param fPrevFlags The flags of the previous event.
1745 * @param uPrevVector The vector of the previous event.
1746 * @param fCurFlags The flags of the current exception.
1747 * @param uCurVector The vector of the current exception.
1748 * @param pfXcptRaiseInfo Where to store additional information about the
1749 * exception condition. Optional.
1750 */
1751VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
1752 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
1753{
1754 /*
1755 * Only CPU exceptions can be raised while delivering other events, software interrupt
1756 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
1757 */
1758 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
1759 Assert(pVCpu); RT_NOREF(pVCpu);
1760 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
1761
1762 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
1763 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
1764 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1765 {
1766 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
1767 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
1768 {
1769 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
1770 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
1771 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
1772 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
1773 {
1774 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1775 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
1776 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
1777 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
1778 uCurVector, pVCpu->cpum.GstCtx.cr2));
1779 }
1780 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1781 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
1782 {
1783 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1784 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
1785 }
1786 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
1787 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1788 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
1789 {
1790 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
1791 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
1792 }
1793 }
1794 else
1795 {
1796 if (uPrevVector == X86_XCPT_NMI)
1797 {
1798 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
1799 if (uCurVector == X86_XCPT_PF)
1800 {
1801 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
1802 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
1803 }
1804 }
1805 else if ( uPrevVector == X86_XCPT_AC
1806 && uCurVector == X86_XCPT_AC)
1807 {
1808 enmRaise = IEMXCPTRAISE_CPU_HANG;
1809 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
1810 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
1811 }
1812 }
1813 }
1814 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
1815 {
1816 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
1817 if (uCurVector == X86_XCPT_PF)
1818 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
1819 }
1820 else
1821 {
1822 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
1823 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
1824 }
1825
1826 if (pfXcptRaiseInfo)
1827 *pfXcptRaiseInfo = fRaiseInfo;
1828 return enmRaise;
1829}
1830
1831
1832/**
1833 * Enters the CPU shutdown state initiated by a triple fault or other
1834 * unrecoverable conditions.
1835 *
1836 * @returns Strict VBox status code.
1837 * @param pVCpu The cross context virtual CPU structure of the
1838 * calling thread.
1839 */
1840static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
1841{
1842 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1843 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
1844
1845 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
1846 {
1847 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
1848 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1849 }
1850
1851 RT_NOREF(pVCpu);
1852 return VINF_EM_TRIPLE_FAULT;
1853}
1854
1855
1856/**
1857 * Validates a new SS segment.
1858 *
1859 * @returns VBox strict status code.
1860 * @param pVCpu The cross context virtual CPU structure of the
1861 * calling thread.
1862 * @param NewSS The new SS selctor.
1863 * @param uCpl The CPL to load the stack for.
1864 * @param pDesc Where to return the descriptor.
1865 */
1866static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
1867{
1868 /* Null selectors are not allowed (we're not called for dispatching
1869 interrupts with SS=0 in long mode). */
1870 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1871 {
1872 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
1873 return iemRaiseTaskSwitchFault0(pVCpu);
1874 }
1875
1876 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1877 if ((NewSS & X86_SEL_RPL) != uCpl)
1878 {
1879 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1880 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1881 }
1882
1883 /*
1884 * Read the descriptor.
1885 */
1886 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
1887 if (rcStrict != VINF_SUCCESS)
1888 return rcStrict;
1889
1890 /*
1891 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1892 */
1893 if (!pDesc->Legacy.Gen.u1DescType)
1894 {
1895 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1896 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1897 }
1898
1899 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1900 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1901 {
1902 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1903 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1904 }
1905 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1906 {
1907 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1908 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1909 }
1910
1911 /* Is it there? */
1912 /** @todo testcase: Is this checked before the canonical / limit check below? */
1913 if (!pDesc->Legacy.Gen.u1Present)
1914 {
1915 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1916 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
1917 }
1918
1919 return VINF_SUCCESS;
1920}
1921
1922/** @} */
1923
1924
1925/** @name Raising Exceptions.
1926 *
1927 * @{
1928 */
1929
1930
1931/**
1932 * Loads the specified stack far pointer from the TSS.
1933 *
1934 * @returns VBox strict status code.
1935 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1936 * @param uCpl The CPL to load the stack for.
1937 * @param pSelSS Where to return the new stack segment.
1938 * @param puEsp Where to return the new stack pointer.
1939 */
1940static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
1941{
1942 VBOXSTRICTRC rcStrict;
1943 Assert(uCpl < 4);
1944
1945 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1946 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
1947 {
1948 /*
1949 * 16-bit TSS (X86TSS16).
1950 */
1951 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1952 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1953 {
1954 uint32_t off = uCpl * 4 + 2;
1955 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1956 {
1957 /** @todo check actual access pattern here. */
1958 uint32_t u32Tmp = 0; /* gcc maybe... */
1959 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1960 if (rcStrict == VINF_SUCCESS)
1961 {
1962 *puEsp = RT_LOWORD(u32Tmp);
1963 *pSelSS = RT_HIWORD(u32Tmp);
1964 return VINF_SUCCESS;
1965 }
1966 }
1967 else
1968 {
1969 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1970 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1971 }
1972 break;
1973 }
1974
1975 /*
1976 * 32-bit TSS (X86TSS32).
1977 */
1978 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1979 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1980 {
1981 uint32_t off = uCpl * 8 + 4;
1982 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1983 {
1984/** @todo check actual access pattern here. */
1985 uint64_t u64Tmp;
1986 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1987 if (rcStrict == VINF_SUCCESS)
1988 {
1989 *puEsp = u64Tmp & UINT32_MAX;
1990 *pSelSS = (RTSEL)(u64Tmp >> 32);
1991 return VINF_SUCCESS;
1992 }
1993 }
1994 else
1995 {
1996 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1997 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1998 }
1999 break;
2000 }
2001
2002 default:
2003 AssertFailed();
2004 rcStrict = VERR_IEM_IPE_4;
2005 break;
2006 }
2007
2008 *puEsp = 0; /* make gcc happy */
2009 *pSelSS = 0; /* make gcc happy */
2010 return rcStrict;
2011}
2012
2013
2014/**
2015 * Loads the specified stack pointer from the 64-bit TSS.
2016 *
2017 * @returns VBox strict status code.
2018 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2019 * @param uCpl The CPL to load the stack for.
2020 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2021 * @param puRsp Where to return the new stack pointer.
2022 */
2023static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
2024{
2025 Assert(uCpl < 4);
2026 Assert(uIst < 8);
2027 *puRsp = 0; /* make gcc happy */
2028
2029 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2030 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
2031
2032 uint32_t off;
2033 if (uIst)
2034 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
2035 else
2036 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
2037 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
2038 {
2039 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
2040 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2041 }
2042
2043 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2044}
2045
2046
2047/**
2048 * Adjust the CPU state according to the exception being raised.
2049 *
2050 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2051 * @param u8Vector The exception that has been raised.
2052 */
2053DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
2054{
2055 switch (u8Vector)
2056 {
2057 case X86_XCPT_DB:
2058 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
2059 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
2060 break;
2061 /** @todo Read the AMD and Intel exception reference... */
2062 }
2063}
2064
2065
2066/**
2067 * Implements exceptions and interrupts for real mode.
2068 *
2069 * @returns VBox strict status code.
2070 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2071 * @param cbInstr The number of bytes to offset rIP by in the return
2072 * address.
2073 * @param u8Vector The interrupt / exception vector number.
2074 * @param fFlags The flags.
2075 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2076 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2077 */
2078static VBOXSTRICTRC
2079iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
2080 uint8_t cbInstr,
2081 uint8_t u8Vector,
2082 uint32_t fFlags,
2083 uint16_t uErr,
2084 uint64_t uCr2) RT_NOEXCEPT
2085{
2086 NOREF(uErr); NOREF(uCr2);
2087 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2088
2089 /*
2090 * Read the IDT entry.
2091 */
2092 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2093 {
2094 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2095 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2096 }
2097 RTFAR16 Idte;
2098 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
2099 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2100 {
2101 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2102 return rcStrict;
2103 }
2104
2105 /*
2106 * Push the stack frame.
2107 */
2108 uint16_t *pu16Frame;
2109 uint64_t uNewRsp;
2110 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &uNewRsp);
2111 if (rcStrict != VINF_SUCCESS)
2112 return rcStrict;
2113
2114 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
2115#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
2116 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
2117 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
2118 fEfl |= UINT16_C(0xf000);
2119#endif
2120 pu16Frame[2] = (uint16_t)fEfl;
2121 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
2122 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
2123 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
2124 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2125 return rcStrict;
2126
2127 /*
2128 * Load the vector address into cs:ip and make exception specific state
2129 * adjustments.
2130 */
2131 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
2132 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
2133 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2134 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
2135 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2136 pVCpu->cpum.GstCtx.rip = Idte.off;
2137 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
2138 IEMMISC_SET_EFL(pVCpu, fEfl);
2139
2140 /** @todo do we actually do this in real mode? */
2141 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2142 iemRaiseXcptAdjustState(pVCpu, u8Vector);
2143
2144 /* The IEM_F_MODE_XXX and IEM_F_X86_CPL_MASK doesn't really change here,
2145 so best leave them alone in case we're in a weird kind of real mode... */
2146
2147 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2148}
2149
2150
2151/**
2152 * Loads a NULL data selector into when coming from V8086 mode.
2153 *
2154 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2155 * @param pSReg Pointer to the segment register.
2156 */
2157DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
2158{
2159 pSReg->Sel = 0;
2160 pSReg->ValidSel = 0;
2161 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2162 {
2163 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2164 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2165 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2166 }
2167 else
2168 {
2169 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2170 /** @todo check this on AMD-V */
2171 pSReg->u64Base = 0;
2172 pSReg->u32Limit = 0;
2173 }
2174}
2175
2176
2177/**
2178 * Loads a segment selector during a task switch in V8086 mode.
2179 *
2180 * @param pSReg Pointer to the segment register.
2181 * @param uSel The selector value to load.
2182 */
2183DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
2184{
2185 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2186 pSReg->Sel = uSel;
2187 pSReg->ValidSel = uSel;
2188 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2189 pSReg->u64Base = uSel << 4;
2190 pSReg->u32Limit = 0xffff;
2191 pSReg->Attr.u = 0xf3;
2192}
2193
2194
2195/**
2196 * Loads a segment selector during a task switch in protected mode.
2197 *
2198 * In this task switch scenario, we would throw \#TS exceptions rather than
2199 * \#GPs.
2200 *
2201 * @returns VBox strict status code.
2202 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2203 * @param pSReg Pointer to the segment register.
2204 * @param uSel The new selector value.
2205 *
2206 * @remarks This does _not_ handle CS or SS.
2207 * @remarks This expects IEM_GET_CPL(pVCpu) to return an up to date value.
2208 */
2209static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
2210{
2211 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2212
2213 /* Null data selector. */
2214 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2215 {
2216 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2217 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2218 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2219 return VINF_SUCCESS;
2220 }
2221
2222 /* Fetch the descriptor. */
2223 IEMSELDESC Desc;
2224 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2225 if (rcStrict != VINF_SUCCESS)
2226 {
2227 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2228 VBOXSTRICTRC_VAL(rcStrict)));
2229 return rcStrict;
2230 }
2231
2232 /* Must be a data segment or readable code segment. */
2233 if ( !Desc.Legacy.Gen.u1DescType
2234 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2235 {
2236 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2237 Desc.Legacy.Gen.u4Type));
2238 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2239 }
2240
2241 /* Check privileges for data segments and non-conforming code segments. */
2242 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2243 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2244 {
2245 /* The RPL and the new CPL must be less than or equal to the DPL. */
2246 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2247 || (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl))
2248 {
2249 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2250 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
2251 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2252 }
2253 }
2254
2255 /* Is it there? */
2256 if (!Desc.Legacy.Gen.u1Present)
2257 {
2258 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2259 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2260 }
2261
2262 /* The base and limit. */
2263 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2264 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2265
2266 /*
2267 * Ok, everything checked out fine. Now set the accessed bit before
2268 * committing the result into the registers.
2269 */
2270 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2271 {
2272 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2273 if (rcStrict != VINF_SUCCESS)
2274 return rcStrict;
2275 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2276 }
2277
2278 /* Commit */
2279 pSReg->Sel = uSel;
2280 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2281 pSReg->u32Limit = cbLimit;
2282 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2283 pSReg->ValidSel = uSel;
2284 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2285 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2286 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2287
2288 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2289 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2290 return VINF_SUCCESS;
2291}
2292
2293
2294/**
2295 * Performs a task switch.
2296 *
2297 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2298 * caller is responsible for performing the necessary checks (like DPL, TSS
2299 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2300 * reference for JMP, CALL, IRET.
2301 *
2302 * If the task switch is the due to a software interrupt or hardware exception,
2303 * the caller is responsible for validating the TSS selector and descriptor. See
2304 * Intel Instruction reference for INT n.
2305 *
2306 * @returns VBox strict status code.
2307 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2308 * @param enmTaskSwitch The cause of the task switch.
2309 * @param uNextEip The EIP effective after the task switch.
2310 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2311 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2312 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2313 * @param SelTSS The TSS selector of the new task.
2314 * @param pNewDescTSS Pointer to the new TSS descriptor.
2315 */
2316VBOXSTRICTRC
2317iemTaskSwitch(PVMCPUCC pVCpu,
2318 IEMTASKSWITCH enmTaskSwitch,
2319 uint32_t uNextEip,
2320 uint32_t fFlags,
2321 uint16_t uErr,
2322 uint64_t uCr2,
2323 RTSEL SelTSS,
2324 PIEMSELDESC pNewDescTSS) RT_NOEXCEPT
2325{
2326 Assert(!IEM_IS_REAL_MODE(pVCpu));
2327 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2328 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2329
2330 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
2331 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2332 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2333 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2334 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2335
2336 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2337 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2338
2339 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
2340 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
2341
2342 /* Update CR2 in case it's a page-fault. */
2343 /** @todo This should probably be done much earlier in IEM/PGM. See
2344 * @bugref{5653#c49}. */
2345 if (fFlags & IEM_XCPT_FLAGS_CR2)
2346 pVCpu->cpum.GstCtx.cr2 = uCr2;
2347
2348 /*
2349 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2350 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2351 */
2352 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
2353 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2354 if (uNewTSSLimit < uNewTSSLimitMin)
2355 {
2356 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
2357 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
2358 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2359 }
2360
2361 /*
2362 * Task switches in VMX non-root mode always cause task switches.
2363 * The new TSS must have been read and validated (DPL, limits etc.) before a
2364 * task-switch VM-exit commences.
2365 *
2366 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2367 */
2368 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2369 {
2370 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
2371 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
2372 }
2373
2374 /*
2375 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2376 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2377 */
2378 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2379 {
2380 uint32_t const uExitInfo1 = SelTSS;
2381 uint32_t uExitInfo2 = uErr;
2382 switch (enmTaskSwitch)
2383 {
2384 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2385 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2386 default: break;
2387 }
2388 if (fFlags & IEM_XCPT_FLAGS_ERR)
2389 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2390 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2391 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2392
2393 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2394 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2395 RT_NOREF2(uExitInfo1, uExitInfo2);
2396 }
2397
2398 /*
2399 * Check the current TSS limit. The last written byte to the current TSS during the
2400 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2401 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2402 *
2403 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2404 * end up with smaller than "legal" TSS limits.
2405 */
2406 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2407 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
2408 if (uCurTSSLimit < uCurTSSLimitMin)
2409 {
2410 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
2411 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
2412 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2413 }
2414
2415 /*
2416 * Verify that the new TSS can be accessed and map it. Map only the required contents
2417 * and not the entire TSS.
2418 */
2419 void *pvNewTSS;
2420 uint32_t const cbNewTSS = uNewTSSLimitMin + 1;
2421 RTGCPTR const GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
2422 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2423 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2424 * not perform correct translation if this happens. See Intel spec. 7.2.1
2425 * "Task-State Segment". */
2426 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW, 0);
2427 if (rcStrict != VINF_SUCCESS)
2428 {
2429 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
2430 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
2431 return rcStrict;
2432 }
2433
2434 /*
2435 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2436 */
2437 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
2438 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2439 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2440 {
2441 PX86DESC pDescCurTSS;
2442 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
2443 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2444 if (rcStrict != VINF_SUCCESS)
2445 {
2446 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2447 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2448 return rcStrict;
2449 }
2450
2451 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2452 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
2453 if (rcStrict != VINF_SUCCESS)
2454 {
2455 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2456 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2457 return rcStrict;
2458 }
2459
2460 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2461 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2462 {
2463 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2464 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2465 fEFlags &= ~X86_EFL_NT;
2466 }
2467 }
2468
2469 /*
2470 * Save the CPU state into the current TSS.
2471 */
2472 RTGCPTR const GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
2473 if (GCPtrNewTSS == GCPtrCurTSS)
2474 {
2475 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
2476 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2477 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,
2478 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2479 pVCpu->cpum.GstCtx.ldtr.Sel));
2480 }
2481 if (fIsNewTSS386)
2482 {
2483 /*
2484 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2485 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2486 */
2487 void *pvCurTSS32;
2488 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
2489 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2490 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2491 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2492 if (rcStrict != VINF_SUCCESS)
2493 {
2494 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2495 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2496 return rcStrict;
2497 }
2498
2499 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2500 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
2501 pCurTSS32->eip = uNextEip;
2502 pCurTSS32->eflags = fEFlags;
2503 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
2504 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
2505 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
2506 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
2507 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
2508 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
2509 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
2510 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
2511 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
2512 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2513 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2514 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2515 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2516 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2517
2518 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
2519 if (rcStrict != VINF_SUCCESS)
2520 {
2521 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2522 VBOXSTRICTRC_VAL(rcStrict)));
2523 return rcStrict;
2524 }
2525 }
2526 else
2527 {
2528 /*
2529 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2530 */
2531 void *pvCurTSS16;
2532 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
2533 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2534 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2535 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2536 if (rcStrict != VINF_SUCCESS)
2537 {
2538 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2539 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2540 return rcStrict;
2541 }
2542
2543 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2544 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
2545 pCurTSS16->ip = uNextEip;
2546 pCurTSS16->flags = (uint16_t)fEFlags;
2547 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
2548 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
2549 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
2550 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
2551 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
2552 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
2553 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
2554 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
2555 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
2556 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2557 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2558 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2559
2560 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
2561 if (rcStrict != VINF_SUCCESS)
2562 {
2563 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2564 VBOXSTRICTRC_VAL(rcStrict)));
2565 return rcStrict;
2566 }
2567 }
2568
2569 /*
2570 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2571 */
2572 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2573 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2574 {
2575 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2576 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
2577 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2578 }
2579
2580 /*
2581 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2582 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2583 */
2584 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2585 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2586 bool fNewDebugTrap;
2587 if (fIsNewTSS386)
2588 {
2589 PCX86TSS32 pNewTSS32 = (PCX86TSS32)pvNewTSS;
2590 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
2591 uNewEip = pNewTSS32->eip;
2592 uNewEflags = pNewTSS32->eflags;
2593 uNewEax = pNewTSS32->eax;
2594 uNewEcx = pNewTSS32->ecx;
2595 uNewEdx = pNewTSS32->edx;
2596 uNewEbx = pNewTSS32->ebx;
2597 uNewEsp = pNewTSS32->esp;
2598 uNewEbp = pNewTSS32->ebp;
2599 uNewEsi = pNewTSS32->esi;
2600 uNewEdi = pNewTSS32->edi;
2601 uNewES = pNewTSS32->es;
2602 uNewCS = pNewTSS32->cs;
2603 uNewSS = pNewTSS32->ss;
2604 uNewDS = pNewTSS32->ds;
2605 uNewFS = pNewTSS32->fs;
2606 uNewGS = pNewTSS32->gs;
2607 uNewLdt = pNewTSS32->selLdt;
2608 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
2609 }
2610 else
2611 {
2612 PCX86TSS16 pNewTSS16 = (PCX86TSS16)pvNewTSS;
2613 uNewCr3 = 0;
2614 uNewEip = pNewTSS16->ip;
2615 uNewEflags = pNewTSS16->flags;
2616 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
2617 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
2618 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
2619 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
2620 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
2621 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
2622 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
2623 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
2624 uNewES = pNewTSS16->es;
2625 uNewCS = pNewTSS16->cs;
2626 uNewSS = pNewTSS16->ss;
2627 uNewDS = pNewTSS16->ds;
2628 uNewFS = 0;
2629 uNewGS = 0;
2630 uNewLdt = pNewTSS16->selLdt;
2631 fNewDebugTrap = false;
2632 }
2633
2634 if (GCPtrNewTSS == GCPtrCurTSS)
2635 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2636 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2637
2638 /*
2639 * We're done accessing the new TSS.
2640 */
2641 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
2642 if (rcStrict != VINF_SUCCESS)
2643 {
2644 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2645 return rcStrict;
2646 }
2647
2648 /*
2649 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2650 */
2651 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2652 {
2653 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
2654 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2655 if (rcStrict != VINF_SUCCESS)
2656 {
2657 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2658 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2659 return rcStrict;
2660 }
2661
2662 /* Check that the descriptor indicates the new TSS is available (not busy). */
2663 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2664 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2665 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
2666
2667 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2668 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
2669 if (rcStrict != VINF_SUCCESS)
2670 {
2671 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2672 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2673 return rcStrict;
2674 }
2675 }
2676
2677 /*
2678 * From this point on, we're technically in the new task. We will defer exceptions
2679 * until the completion of the task switch but before executing any instructions in the new task.
2680 */
2681 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
2682 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
2683 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2684 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
2685 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
2686 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
2687 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
2688
2689 /* Set the busy bit in TR. */
2690 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2691
2692 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2693 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2694 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2695 {
2696 uNewEflags |= X86_EFL_NT;
2697 }
2698
2699 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2700 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
2701 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
2702
2703 pVCpu->cpum.GstCtx.eip = uNewEip;
2704 pVCpu->cpum.GstCtx.eax = uNewEax;
2705 pVCpu->cpum.GstCtx.ecx = uNewEcx;
2706 pVCpu->cpum.GstCtx.edx = uNewEdx;
2707 pVCpu->cpum.GstCtx.ebx = uNewEbx;
2708 pVCpu->cpum.GstCtx.esp = uNewEsp;
2709 pVCpu->cpum.GstCtx.ebp = uNewEbp;
2710 pVCpu->cpum.GstCtx.esi = uNewEsi;
2711 pVCpu->cpum.GstCtx.edi = uNewEdi;
2712
2713 uNewEflags &= X86_EFL_LIVE_MASK;
2714 uNewEflags |= X86_EFL_RA1_MASK;
2715 IEMMISC_SET_EFL(pVCpu, uNewEflags);
2716
2717 /*
2718 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2719 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2720 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2721 */
2722 pVCpu->cpum.GstCtx.es.Sel = uNewES;
2723 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
2724
2725 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2726 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
2727
2728 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2729 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
2730
2731 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
2732 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
2733
2734 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
2735 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
2736
2737 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
2738 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
2739 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2740
2741 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
2742 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2743 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
2744 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
2745
2746 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2747 {
2748 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
2749 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
2750 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
2751 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
2752 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
2753 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
2754 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2755 }
2756
2757 /*
2758 * Switch CR3 for the new task.
2759 */
2760 if ( fIsNewTSS386
2761 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
2762 {
2763 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2764 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
2765 AssertRCSuccessReturn(rc, rc);
2766
2767 /* Inform PGM. */
2768 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
2769 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
2770 AssertRCReturn(rc, rc);
2771 /* ignore informational status codes */
2772
2773 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
2774 }
2775
2776 /*
2777 * Switch LDTR for the new task.
2778 */
2779 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2780 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
2781 else
2782 {
2783 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2784
2785 IEMSELDESC DescNewLdt;
2786 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2787 if (rcStrict != VINF_SUCCESS)
2788 {
2789 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2790 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2791 return rcStrict;
2792 }
2793 if ( !DescNewLdt.Legacy.Gen.u1Present
2794 || DescNewLdt.Legacy.Gen.u1DescType
2795 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2796 {
2797 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2798 uNewLdt, DescNewLdt.Legacy.u));
2799 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2800 }
2801
2802 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
2803 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2804 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2805 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2806 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2807 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2808 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2809 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
2810 }
2811
2812 IEMSELDESC DescSS;
2813 if (IEM_IS_V86_MODE(pVCpu))
2814 {
2815 IEM_SET_CPL(pVCpu, 3);
2816 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
2817 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
2818 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
2819 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
2820 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
2821 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
2822
2823 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
2824 DescSS.Legacy.u = 0;
2825 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
2826 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
2827 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
2828 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
2829 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
2830 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
2831 DescSS.Legacy.Gen.u2Dpl = 3;
2832 }
2833 else
2834 {
2835 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
2836
2837 /*
2838 * Load the stack segment for the new task.
2839 */
2840 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2841 {
2842 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2843 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2844 }
2845
2846 /* Fetch the descriptor. */
2847 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
2848 if (rcStrict != VINF_SUCCESS)
2849 {
2850 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2851 VBOXSTRICTRC_VAL(rcStrict)));
2852 return rcStrict;
2853 }
2854
2855 /* SS must be a data segment and writable. */
2856 if ( !DescSS.Legacy.Gen.u1DescType
2857 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2858 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2859 {
2860 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2861 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2862 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2863 }
2864
2865 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2866 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2867 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2868 {
2869 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2870 uNewCpl));
2871 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2872 }
2873
2874 /* Is it there? */
2875 if (!DescSS.Legacy.Gen.u1Present)
2876 {
2877 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
2878 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2879 }
2880
2881 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
2882 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
2883
2884 /* Set the accessed bit before committing the result into SS. */
2885 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2886 {
2887 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
2888 if (rcStrict != VINF_SUCCESS)
2889 return rcStrict;
2890 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2891 }
2892
2893 /* Commit SS. */
2894 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2895 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
2896 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2897 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
2898 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
2899 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2900 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
2901
2902 /* CPL has changed, update IEM before loading rest of segments. */
2903 IEM_SET_CPL(pVCpu, uNewCpl);
2904
2905 /*
2906 * Load the data segments for the new task.
2907 */
2908 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
2909 if (rcStrict != VINF_SUCCESS)
2910 return rcStrict;
2911 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
2912 if (rcStrict != VINF_SUCCESS)
2913 return rcStrict;
2914 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
2915 if (rcStrict != VINF_SUCCESS)
2916 return rcStrict;
2917 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
2918 if (rcStrict != VINF_SUCCESS)
2919 return rcStrict;
2920
2921 /*
2922 * Load the code segment for the new task.
2923 */
2924 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
2925 {
2926 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
2927 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2928 }
2929
2930 /* Fetch the descriptor. */
2931 IEMSELDESC DescCS;
2932 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
2933 if (rcStrict != VINF_SUCCESS)
2934 {
2935 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
2936 return rcStrict;
2937 }
2938
2939 /* CS must be a code segment. */
2940 if ( !DescCS.Legacy.Gen.u1DescType
2941 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2942 {
2943 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
2944 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
2945 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2946 }
2947
2948 /* For conforming CS, DPL must be less than or equal to the RPL. */
2949 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2950 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
2951 {
2952 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
2953 DescCS.Legacy.Gen.u2Dpl));
2954 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2955 }
2956
2957 /* For non-conforming CS, DPL must match RPL. */
2958 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2959 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
2960 {
2961 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
2962 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
2963 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2964 }
2965
2966 /* Is it there? */
2967 if (!DescCS.Legacy.Gen.u1Present)
2968 {
2969 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
2970 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2971 }
2972
2973 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
2974 u64Base = X86DESC_BASE(&DescCS.Legacy);
2975
2976 /* Set the accessed bit before committing the result into CS. */
2977 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2978 {
2979 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
2980 if (rcStrict != VINF_SUCCESS)
2981 return rcStrict;
2982 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2983 }
2984
2985 /* Commit CS. */
2986 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2987 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
2988 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2989 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
2990 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2991 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2992 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
2993 }
2994
2995 /* Make sure the CPU mode is correct. */
2996 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
2997 if (fExecNew != pVCpu->iem.s.fExec)
2998 Log(("iemTaskSwitch: fExec %#x -> %#x (xor %#x)\n", pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
2999 pVCpu->iem.s.fExec = fExecNew;
3000
3001 /** @todo Debug trap. */
3002 if (fIsNewTSS386 && fNewDebugTrap)
3003 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
3004
3005 /*
3006 * Construct the error code masks based on what caused this task switch.
3007 * See Intel Instruction reference for INT.
3008 */
3009 uint16_t uExt;
3010 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
3011 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3012 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
3013 uExt = 1;
3014 else
3015 uExt = 0;
3016
3017 /*
3018 * Push any error code on to the new stack.
3019 */
3020 if (fFlags & IEM_XCPT_FLAGS_ERR)
3021 {
3022 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3023 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3024 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
3025
3026 /* Check that there is sufficient space on the stack. */
3027 /** @todo Factor out segment limit checking for normal/expand down segments
3028 * into a separate function. */
3029 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3030 {
3031 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
3032 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
3033 {
3034 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3035 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
3036 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3037 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3038 }
3039 }
3040 else
3041 {
3042 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
3043 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3044 {
3045 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
3046 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3047 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3048 }
3049 }
3050
3051
3052 if (fIsNewTSS386)
3053 rcStrict = iemMemStackPushU32(pVCpu, uErr);
3054 else
3055 rcStrict = iemMemStackPushU16(pVCpu, uErr);
3056 if (rcStrict != VINF_SUCCESS)
3057 {
3058 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
3059 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
3060 return rcStrict;
3061 }
3062 }
3063
3064 /* Check the new EIP against the new CS limit. */
3065 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
3066 {
3067 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
3068 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
3069 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3070 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
3071 }
3072
3073 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3074 pVCpu->cpum.GstCtx.ss.Sel));
3075 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3076}
3077
3078
3079/**
3080 * Implements exceptions and interrupts for protected mode.
3081 *
3082 * @returns VBox strict status code.
3083 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3084 * @param cbInstr The number of bytes to offset rIP by in the return
3085 * address.
3086 * @param u8Vector The interrupt / exception vector number.
3087 * @param fFlags The flags.
3088 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3089 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3090 */
3091static VBOXSTRICTRC
3092iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
3093 uint8_t cbInstr,
3094 uint8_t u8Vector,
3095 uint32_t fFlags,
3096 uint16_t uErr,
3097 uint64_t uCr2) RT_NOEXCEPT
3098{
3099 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3100
3101 /*
3102 * Read the IDT entry.
3103 */
3104 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3105 {
3106 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3107 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3108 }
3109 X86DESC Idte;
3110 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
3111 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
3112 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3113 {
3114 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3115 return rcStrict;
3116 }
3117 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x - from %04x:%08RX64 efl=%#x depth=%d\n",
3118 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3119 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow,
3120 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->iem.s.cXcptRecursions));
3121
3122 /*
3123 * Check the descriptor type, DPL and such.
3124 * ASSUMES this is done in the same order as described for call-gate calls.
3125 */
3126 if (Idte.Gate.u1DescType)
3127 {
3128 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3129 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3130 }
3131 bool fTaskGate = false;
3132 uint8_t f32BitGate = true;
3133 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3134 switch (Idte.Gate.u4Type)
3135 {
3136 case X86_SEL_TYPE_SYS_UNDEFINED:
3137 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3138 case X86_SEL_TYPE_SYS_LDT:
3139 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3140 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3141 case X86_SEL_TYPE_SYS_UNDEFINED2:
3142 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3143 case X86_SEL_TYPE_SYS_UNDEFINED3:
3144 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3145 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3146 case X86_SEL_TYPE_SYS_UNDEFINED4:
3147 {
3148 /** @todo check what actually happens when the type is wrong...
3149 * esp. call gates. */
3150 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3151 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3152 }
3153
3154 case X86_SEL_TYPE_SYS_286_INT_GATE:
3155 f32BitGate = false;
3156 RT_FALL_THRU();
3157 case X86_SEL_TYPE_SYS_386_INT_GATE:
3158 fEflToClear |= X86_EFL_IF;
3159 break;
3160
3161 case X86_SEL_TYPE_SYS_TASK_GATE:
3162 fTaskGate = true;
3163#ifndef IEM_IMPLEMENTS_TASKSWITCH
3164 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3165#endif
3166 break;
3167
3168 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3169 f32BitGate = false;
3170 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3171 break;
3172
3173 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3174 }
3175
3176 /* Check DPL against CPL if applicable. */
3177 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3178 {
3179 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3180 {
3181 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3182 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3183 }
3184 }
3185
3186 /* Is it there? */
3187 if (!Idte.Gate.u1Present)
3188 {
3189 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3190 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3191 }
3192
3193 /* Is it a task-gate? */
3194 if (fTaskGate)
3195 {
3196 /*
3197 * Construct the error code masks based on what caused this task switch.
3198 * See Intel Instruction reference for INT.
3199 */
3200 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3201 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
3202 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3203 RTSEL SelTSS = Idte.Gate.u16Sel;
3204
3205 /*
3206 * Fetch the TSS descriptor in the GDT.
3207 */
3208 IEMSELDESC DescTSS;
3209 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
3210 if (rcStrict != VINF_SUCCESS)
3211 {
3212 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
3213 VBOXSTRICTRC_VAL(rcStrict)));
3214 return rcStrict;
3215 }
3216
3217 /* The TSS descriptor must be a system segment and be available (not busy). */
3218 if ( DescTSS.Legacy.Gen.u1DescType
3219 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3220 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3221 {
3222 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3223 u8Vector, SelTSS, DescTSS.Legacy.au64));
3224 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
3225 }
3226
3227 /* The TSS must be present. */
3228 if (!DescTSS.Legacy.Gen.u1Present)
3229 {
3230 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
3231 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
3232 }
3233
3234 /* Do the actual task switch. */
3235 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3236 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3237 fFlags, uErr, uCr2, SelTSS, &DescTSS);
3238 }
3239
3240 /* A null CS is bad. */
3241 RTSEL NewCS = Idte.Gate.u16Sel;
3242 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3243 {
3244 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3245 return iemRaiseGeneralProtectionFault0(pVCpu);
3246 }
3247
3248 /* Fetch the descriptor for the new CS. */
3249 IEMSELDESC DescCS;
3250 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3251 if (rcStrict != VINF_SUCCESS)
3252 {
3253 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3254 return rcStrict;
3255 }
3256
3257 /* Must be a code segment. */
3258 if (!DescCS.Legacy.Gen.u1DescType)
3259 {
3260 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3261 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3262 }
3263 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3264 {
3265 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3266 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3267 }
3268
3269 /* Don't allow lowering the privilege level. */
3270 /** @todo Does the lowering of privileges apply to software interrupts
3271 * only? This has bearings on the more-privileged or
3272 * same-privilege stack behavior further down. A testcase would
3273 * be nice. */
3274 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3275 {
3276 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3277 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3278 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3279 }
3280
3281 /* Make sure the selector is present. */
3282 if (!DescCS.Legacy.Gen.u1Present)
3283 {
3284 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3285 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3286 }
3287
3288#ifdef LOG_ENABLED
3289 /* If software interrupt, try decode it if logging is enabled and such. */
3290 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3291 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
3292 iemLogSyscallProtModeInt(pVCpu, u8Vector, cbInstr);
3293#endif
3294
3295 /* Check the new EIP against the new CS limit. */
3296 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3297 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3298 ? Idte.Gate.u16OffsetLow
3299 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3300 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3301 if (uNewEip > cbLimitCS)
3302 {
3303 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3304 u8Vector, uNewEip, cbLimitCS, NewCS));
3305 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3306 }
3307 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3308
3309 /* Calc the flag image to push. */
3310 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3311 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3312 fEfl &= ~X86_EFL_RF;
3313 else
3314 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3315
3316 /* From V8086 mode only go to CPL 0. */
3317 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3318 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3319 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3320 {
3321 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3322 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3323 }
3324
3325 /*
3326 * If the privilege level changes, we need to get a new stack from the TSS.
3327 * This in turns means validating the new SS and ESP...
3328 */
3329 if (uNewCpl != IEM_GET_CPL(pVCpu))
3330 {
3331 RTSEL NewSS;
3332 uint32_t uNewEsp;
3333 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3334 if (rcStrict != VINF_SUCCESS)
3335 return rcStrict;
3336
3337 IEMSELDESC DescSS;
3338 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3339 if (rcStrict != VINF_SUCCESS)
3340 return rcStrict;
3341 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3342 if (!DescSS.Legacy.Gen.u1DefBig)
3343 {
3344 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3345 uNewEsp = (uint16_t)uNewEsp;
3346 }
3347
3348 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3349
3350 /* Check that there is sufficient space for the stack frame. */
3351 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3352 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3353 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3354 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3355
3356 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3357 {
3358 if ( uNewEsp - 1 > cbLimitSS
3359 || uNewEsp < cbStackFrame)
3360 {
3361 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3362 u8Vector, NewSS, uNewEsp, cbStackFrame));
3363 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3364 }
3365 }
3366 else
3367 {
3368 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3369 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3370 {
3371 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3372 u8Vector, NewSS, uNewEsp, cbStackFrame));
3373 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3374 }
3375 }
3376
3377 /*
3378 * Start making changes.
3379 */
3380
3381 /* Set the new CPL so that stack accesses use it. */
3382 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3383 IEM_SET_CPL(pVCpu, uNewCpl);
3384
3385 /* Create the stack frame. */
3386 RTPTRUNION uStackFrame;
3387 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3388 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
3389 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3390 if (rcStrict != VINF_SUCCESS)
3391 return rcStrict;
3392 void * const pvStackFrame = uStackFrame.pv;
3393 if (f32BitGate)
3394 {
3395 if (fFlags & IEM_XCPT_FLAGS_ERR)
3396 *uStackFrame.pu32++ = uErr;
3397 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3398 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3399 uStackFrame.pu32[2] = fEfl;
3400 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3401 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3402 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3403 if (fEfl & X86_EFL_VM)
3404 {
3405 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3406 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3407 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3408 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3409 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3410 }
3411 }
3412 else
3413 {
3414 if (fFlags & IEM_XCPT_FLAGS_ERR)
3415 *uStackFrame.pu16++ = uErr;
3416 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3417 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3418 uStackFrame.pu16[2] = fEfl;
3419 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3420 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3421 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3422 if (fEfl & X86_EFL_VM)
3423 {
3424 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3425 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3426 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3427 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3428 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3429 }
3430 }
3431 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3432 if (rcStrict != VINF_SUCCESS)
3433 return rcStrict;
3434
3435 /* Mark the selectors 'accessed' (hope this is the correct time). */
3436 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3437 * after pushing the stack frame? (Write protect the gdt + stack to
3438 * find out.) */
3439 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3440 {
3441 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3442 if (rcStrict != VINF_SUCCESS)
3443 return rcStrict;
3444 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3445 }
3446
3447 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3448 {
3449 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3450 if (rcStrict != VINF_SUCCESS)
3451 return rcStrict;
3452 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3453 }
3454
3455 /*
3456 * Start comitting the register changes (joins with the DPL=CPL branch).
3457 */
3458 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3459 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3460 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3461 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3462 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3463 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3464 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3465 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3466 * SP is loaded).
3467 * Need to check the other combinations too:
3468 * - 16-bit TSS, 32-bit handler
3469 * - 32-bit TSS, 16-bit handler */
3470 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3471 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3472 else
3473 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3474
3475 if (fEfl & X86_EFL_VM)
3476 {
3477 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3478 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3479 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3480 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3481 }
3482 }
3483 /*
3484 * Same privilege, no stack change and smaller stack frame.
3485 */
3486 else
3487 {
3488 uint64_t uNewRsp;
3489 RTPTRUNION uStackFrame;
3490 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3491 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1, &uStackFrame.pv, &uNewRsp);
3492 if (rcStrict != VINF_SUCCESS)
3493 return rcStrict;
3494 void * const pvStackFrame = uStackFrame.pv;
3495
3496 if (f32BitGate)
3497 {
3498 if (fFlags & IEM_XCPT_FLAGS_ERR)
3499 *uStackFrame.pu32++ = uErr;
3500 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3501 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3502 uStackFrame.pu32[2] = fEfl;
3503 }
3504 else
3505 {
3506 if (fFlags & IEM_XCPT_FLAGS_ERR)
3507 *uStackFrame.pu16++ = uErr;
3508 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3509 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3510 uStackFrame.pu16[2] = fEfl;
3511 }
3512 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
3513 if (rcStrict != VINF_SUCCESS)
3514 return rcStrict;
3515
3516 /* Mark the CS selector as 'accessed'. */
3517 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3518 {
3519 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3520 if (rcStrict != VINF_SUCCESS)
3521 return rcStrict;
3522 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3523 }
3524
3525 /*
3526 * Start committing the register changes (joins with the other branch).
3527 */
3528 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3529 }
3530
3531 /* ... register committing continues. */
3532 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3533 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3534 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3535 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3536 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3537 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3538
3539 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3540 fEfl &= ~fEflToClear;
3541 IEMMISC_SET_EFL(pVCpu, fEfl);
3542
3543 if (fFlags & IEM_XCPT_FLAGS_CR2)
3544 pVCpu->cpum.GstCtx.cr2 = uCr2;
3545
3546 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3547 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3548
3549 /* Make sure the execution flags are correct. */
3550 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3551 if (fExecNew != pVCpu->iem.s.fExec)
3552 Log(("iemRaiseXcptOrIntInProtMode: fExec %#x -> %#x (xor %#x)\n",
3553 pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3554 pVCpu->iem.s.fExec = fExecNew;
3555 Assert(IEM_GET_CPL(pVCpu) == uNewCpl);
3556
3557 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3558}
3559
3560
3561/**
3562 * Implements exceptions and interrupts for long mode.
3563 *
3564 * @returns VBox strict status code.
3565 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3566 * @param cbInstr The number of bytes to offset rIP by in the return
3567 * address.
3568 * @param u8Vector The interrupt / exception vector number.
3569 * @param fFlags The flags.
3570 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3571 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3572 */
3573static VBOXSTRICTRC
3574iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3575 uint8_t cbInstr,
3576 uint8_t u8Vector,
3577 uint32_t fFlags,
3578 uint16_t uErr,
3579 uint64_t uCr2) RT_NOEXCEPT
3580{
3581 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3582
3583 /*
3584 * Read the IDT entry.
3585 */
3586 uint16_t offIdt = (uint16_t)u8Vector << 4;
3587 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3588 {
3589 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3590 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3591 }
3592 X86DESC64 Idte;
3593#ifdef _MSC_VER /* Shut up silly compiler warning. */
3594 Idte.au64[0] = 0;
3595 Idte.au64[1] = 0;
3596#endif
3597 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3598 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3599 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3600 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3601 {
3602 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3603 return rcStrict;
3604 }
3605 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3606 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3607 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3608
3609 /*
3610 * Check the descriptor type, DPL and such.
3611 * ASSUMES this is done in the same order as described for call-gate calls.
3612 */
3613 if (Idte.Gate.u1DescType)
3614 {
3615 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3616 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3617 }
3618 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3619 switch (Idte.Gate.u4Type)
3620 {
3621 case AMD64_SEL_TYPE_SYS_INT_GATE:
3622 fEflToClear |= X86_EFL_IF;
3623 break;
3624 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3625 break;
3626
3627 default:
3628 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3629 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3630 }
3631
3632 /* Check DPL against CPL if applicable. */
3633 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3634 {
3635 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3636 {
3637 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3638 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3639 }
3640 }
3641
3642 /* Is it there? */
3643 if (!Idte.Gate.u1Present)
3644 {
3645 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3646 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3647 }
3648
3649 /* A null CS is bad. */
3650 RTSEL NewCS = Idte.Gate.u16Sel;
3651 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3652 {
3653 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3654 return iemRaiseGeneralProtectionFault0(pVCpu);
3655 }
3656
3657 /* Fetch the descriptor for the new CS. */
3658 IEMSELDESC DescCS;
3659 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
3660 if (rcStrict != VINF_SUCCESS)
3661 {
3662 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3663 return rcStrict;
3664 }
3665
3666 /* Must be a 64-bit code segment. */
3667 if (!DescCS.Long.Gen.u1DescType)
3668 {
3669 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3670 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3671 }
3672 if ( !DescCS.Long.Gen.u1Long
3673 || DescCS.Long.Gen.u1DefBig
3674 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3675 {
3676 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3677 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3678 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3679 }
3680
3681 /* Don't allow lowering the privilege level. For non-conforming CS
3682 selectors, the CS.DPL sets the privilege level the trap/interrupt
3683 handler runs at. For conforming CS selectors, the CPL remains
3684 unchanged, but the CS.DPL must be <= CPL. */
3685 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3686 * when CPU in Ring-0. Result \#GP? */
3687 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3688 {
3689 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3690 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3691 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3692 }
3693
3694
3695 /* Make sure the selector is present. */
3696 if (!DescCS.Legacy.Gen.u1Present)
3697 {
3698 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3699 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3700 }
3701
3702 /* Check that the new RIP is canonical. */
3703 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3704 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3705 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3706 if (!IEM_IS_CANONICAL(uNewRip))
3707 {
3708 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3709 return iemRaiseGeneralProtectionFault0(pVCpu);
3710 }
3711
3712 /*
3713 * If the privilege level changes or if the IST isn't zero, we need to get
3714 * a new stack from the TSS.
3715 */
3716 uint64_t uNewRsp;
3717 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3718 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3719 if ( uNewCpl != IEM_GET_CPL(pVCpu)
3720 || Idte.Gate.u3IST != 0)
3721 {
3722 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3723 if (rcStrict != VINF_SUCCESS)
3724 return rcStrict;
3725 }
3726 else
3727 uNewRsp = pVCpu->cpum.GstCtx.rsp;
3728 uNewRsp &= ~(uint64_t)0xf;
3729
3730 /*
3731 * Calc the flag image to push.
3732 */
3733 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3734 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3735 fEfl &= ~X86_EFL_RF;
3736 else
3737 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3738
3739 /*
3740 * Start making changes.
3741 */
3742 /* Set the new CPL so that stack accesses use it. */
3743 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3744 IEM_SET_CPL(pVCpu, uNewCpl);
3745/** @todo Setting CPL this early seems wrong as it would affect and errors we
3746 * raise accessing the stack and (?) GDT/LDT... */
3747
3748 /* Create the stack frame. */
3749 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3750 RTPTRUNION uStackFrame;
3751 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3752 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3753 if (rcStrict != VINF_SUCCESS)
3754 return rcStrict;
3755 void * const pvStackFrame = uStackFrame.pv;
3756
3757 if (fFlags & IEM_XCPT_FLAGS_ERR)
3758 *uStackFrame.pu64++ = uErr;
3759 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
3760 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
3761 uStackFrame.pu64[2] = fEfl;
3762 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
3763 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
3764 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3765 if (rcStrict != VINF_SUCCESS)
3766 return rcStrict;
3767
3768 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3769 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3770 * after pushing the stack frame? (Write protect the gdt + stack to
3771 * find out.) */
3772 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3773 {
3774 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3775 if (rcStrict != VINF_SUCCESS)
3776 return rcStrict;
3777 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3778 }
3779
3780 /*
3781 * Start comitting the register changes.
3782 */
3783 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3784 * hidden registers when interrupting 32-bit or 16-bit code! */
3785 if (uNewCpl != uOldCpl)
3786 {
3787 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
3788 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
3789 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3790 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3791 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3792 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3793 }
3794 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
3795 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3796 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3797 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3798 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3799 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3800 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3801 pVCpu->cpum.GstCtx.rip = uNewRip;
3802
3803 fEfl &= ~fEflToClear;
3804 IEMMISC_SET_EFL(pVCpu, fEfl);
3805
3806 if (fFlags & IEM_XCPT_FLAGS_CR2)
3807 pVCpu->cpum.GstCtx.cr2 = uCr2;
3808
3809 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3810 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3811
3812 iemRecalcExecModeAndCplFlags(pVCpu);
3813
3814 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3815}
3816
3817
3818/**
3819 * Implements exceptions and interrupts.
3820 *
3821 * All exceptions and interrupts goes thru this function!
3822 *
3823 * @returns VBox strict status code.
3824 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3825 * @param cbInstr The number of bytes to offset rIP by in the return
3826 * address.
3827 * @param u8Vector The interrupt / exception vector number.
3828 * @param fFlags The flags.
3829 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3830 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3831 */
3832VBOXSTRICTRC
3833iemRaiseXcptOrInt(PVMCPUCC pVCpu,
3834 uint8_t cbInstr,
3835 uint8_t u8Vector,
3836 uint32_t fFlags,
3837 uint16_t uErr,
3838 uint64_t uCr2) RT_NOEXCEPT
3839{
3840 /*
3841 * Get all the state that we might need here.
3842 */
3843 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3844 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3845
3846#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
3847 /*
3848 * Flush prefetch buffer
3849 */
3850 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3851#endif
3852
3853 /*
3854 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3855 */
3856 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
3857 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
3858 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
3859 | IEM_XCPT_FLAGS_BP_INSTR
3860 | IEM_XCPT_FLAGS_ICEBP_INSTR
3861 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3862 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
3863 {
3864 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3865 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3866 u8Vector = X86_XCPT_GP;
3867 uErr = 0;
3868 }
3869
3870 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3871#ifdef DBGFTRACE_ENABLED
3872 RTTraceBufAddMsgF(pVM->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3873 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3874 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
3875#endif
3876
3877 /*
3878 * Check if DBGF wants to intercept the exception.
3879 */
3880 if ( (fFlags & (IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_T_SOFT_INT))
3881 || !DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector)) )
3882 { /* likely */ }
3883 else
3884 {
3885 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector),
3886 DBGFEVENTCTX_INVALID, 1, (uint64_t)uErr);
3887 if (rcStrict != VINF_SUCCESS)
3888 return rcStrict;
3889 }
3890
3891 /*
3892 * Evaluate whether NMI blocking should be in effect.
3893 * Normally, NMI blocking is in effect whenever we inject an NMI.
3894 */
3895 bool fBlockNmi = u8Vector == X86_XCPT_NMI
3896 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);
3897
3898#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3899 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3900 {
3901 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
3902 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3903 return rcStrict0;
3904
3905 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
3906 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
3907 {
3908 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
3909 fBlockNmi = false;
3910 }
3911 }
3912#endif
3913
3914#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3915 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
3916 {
3917 /*
3918 * If the event is being injected as part of VMRUN, it isn't subject to event
3919 * intercepts in the nested-guest. However, secondary exceptions that occur
3920 * during injection of any event -are- subject to exception intercepts.
3921 *
3922 * See AMD spec. 15.20 "Event Injection".
3923 */
3924 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
3925 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
3926 else
3927 {
3928 /*
3929 * Check and handle if the event being raised is intercepted.
3930 */
3931 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3932 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
3933 return rcStrict0;
3934 }
3935 }
3936#endif
3937
3938 /*
3939 * Set NMI blocking if necessary.
3940 */
3941 if (fBlockNmi)
3942 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
3943
3944 /*
3945 * Do recursion accounting.
3946 */
3947 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
3948 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
3949 if (pVCpu->iem.s.cXcptRecursions == 0)
3950 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3951 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
3952 else
3953 {
3954 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3955 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
3956 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
3957
3958 if (pVCpu->iem.s.cXcptRecursions >= 4)
3959 {
3960#ifdef DEBUG_bird
3961 AssertFailed();
3962#endif
3963 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3964 }
3965
3966 /*
3967 * Evaluate the sequence of recurring events.
3968 */
3969 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
3970 NULL /* pXcptRaiseInfo */);
3971 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
3972 { /* likely */ }
3973 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
3974 {
3975 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
3976 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3977 u8Vector = X86_XCPT_DF;
3978 uErr = 0;
3979#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3980 /* VMX nested-guest #DF intercept needs to be checked here. */
3981 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3982 {
3983 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
3984 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3985 return rcStrict0;
3986 }
3987#endif
3988 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
3989 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
3990 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3991 }
3992 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
3993 {
3994 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
3995 return iemInitiateCpuShutdown(pVCpu);
3996 }
3997 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
3998 {
3999 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
4000 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
4001 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
4002 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
4003 return VERR_EM_GUEST_CPU_HANG;
4004 }
4005 else
4006 {
4007 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
4008 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
4009 return VERR_IEM_IPE_9;
4010 }
4011
4012 /*
4013 * The 'EXT' bit is set when an exception occurs during deliver of an external
4014 * event (such as an interrupt or earlier exception)[1]. Privileged software
4015 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
4016 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
4017 *
4018 * [1] - Intel spec. 6.13 "Error Code"
4019 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
4020 * [3] - Intel Instruction reference for INT n.
4021 */
4022 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
4023 && (fFlags & IEM_XCPT_FLAGS_ERR)
4024 && u8Vector != X86_XCPT_PF
4025 && u8Vector != X86_XCPT_DF)
4026 {
4027 uErr |= X86_TRAP_ERR_EXTERNAL;
4028 }
4029 }
4030
4031 pVCpu->iem.s.cXcptRecursions++;
4032 pVCpu->iem.s.uCurXcpt = u8Vector;
4033 pVCpu->iem.s.fCurXcpt = fFlags;
4034 pVCpu->iem.s.uCurXcptErr = uErr;
4035 pVCpu->iem.s.uCurXcptCr2 = uCr2;
4036
4037 /*
4038 * Extensive logging.
4039 */
4040#if defined(LOG_ENABLED) && defined(IN_RING3)
4041 if (LogIs3Enabled())
4042 {
4043 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
4044 char szRegs[4096];
4045 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4046 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4047 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4048 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4049 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4050 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4051 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4052 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4053 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4054 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4055 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4056 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4057 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4058 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4059 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4060 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4061 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4062 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4063 " efer=%016VR{efer}\n"
4064 " pat=%016VR{pat}\n"
4065 " sf_mask=%016VR{sf_mask}\n"
4066 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4067 " lstar=%016VR{lstar}\n"
4068 " star=%016VR{star} cstar=%016VR{cstar}\n"
4069 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4070 );
4071
4072 char szInstr[256];
4073 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4074 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4075 szInstr, sizeof(szInstr), NULL);
4076 Log3(("%s%s\n", szRegs, szInstr));
4077 }
4078#endif /* LOG_ENABLED */
4079
4080 /*
4081 * Stats.
4082 */
4083 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
4084 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
4085 else if (u8Vector <= X86_XCPT_LAST)
4086 {
4087 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
4088 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
4089 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, ASMReadTSC());
4090 }
4091
4092 /*
4093 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
4094 * to ensure that a stale TLB or paging cache entry will only cause one
4095 * spurious #PF.
4096 */
4097 if ( u8Vector == X86_XCPT_PF
4098 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
4099 IEMTlbInvalidatePage(pVCpu, uCr2);
4100
4101 /*
4102 * Call the mode specific worker function.
4103 */
4104 VBOXSTRICTRC rcStrict;
4105 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4106 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4107 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
4108 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4109 else
4110 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4111
4112 /* Flush the prefetch buffer. */
4113 iemOpcodeFlushHeavy(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
4114
4115 /*
4116 * Unwind.
4117 */
4118 pVCpu->iem.s.cXcptRecursions--;
4119 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
4120 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
4121 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
4122 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel,
4123 pVCpu->cpum.GstCtx.esp, IEM_GET_CPL(pVCpu), pVCpu->iem.s.cXcptRecursions + 1));
4124 return rcStrict;
4125}
4126
4127#ifdef IEM_WITH_SETJMP
4128/**
4129 * See iemRaiseXcptOrInt. Will not return.
4130 */
4131DECL_NO_RETURN(void)
4132iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
4133 uint8_t cbInstr,
4134 uint8_t u8Vector,
4135 uint32_t fFlags,
4136 uint16_t uErr,
4137 uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP
4138{
4139 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4140 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
4141}
4142#endif
4143
4144
4145/** \#DE - 00. */
4146VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
4147{
4148 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4149}
4150
4151
4152/** \#DB - 01.
4153 * @note This automatically clear DR7.GD. */
4154VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
4155{
4156 /* This always clears RF (via IEM_XCPT_FLAGS_DRx_INSTR_BP). */
4157 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
4158 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_DRx_INSTR_BP, 0, 0);
4159}
4160
4161
4162/** \#BR - 05. */
4163VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
4164{
4165 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4166}
4167
4168
4169/** \#UD - 06. */
4170VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
4171{
4172 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4173}
4174
4175
4176/** \#NM - 07. */
4177VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
4178{
4179 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4180}
4181
4182
4183/** \#TS(err) - 0a. */
4184VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4185{
4186 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4187}
4188
4189
4190/** \#TS(tr) - 0a. */
4191VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
4192{
4193 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4194 pVCpu->cpum.GstCtx.tr.Sel, 0);
4195}
4196
4197
4198/** \#TS(0) - 0a. */
4199VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4200{
4201 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4202 0, 0);
4203}
4204
4205
4206/** \#TS(err) - 0a. */
4207VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4208{
4209 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4210 uSel & X86_SEL_MASK_OFF_RPL, 0);
4211}
4212
4213
4214/** \#NP(err) - 0b. */
4215VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4216{
4217 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4218}
4219
4220
4221/** \#NP(sel) - 0b. */
4222VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4223{
4224 Log(("iemRaiseSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4225 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4226 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4227 uSel & ~X86_SEL_RPL, 0);
4228}
4229
4230
4231/** \#SS(seg) - 0c. */
4232VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4233{
4234 Log(("iemRaiseStackSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4235 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4236 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4237 uSel & ~X86_SEL_RPL, 0);
4238}
4239
4240
4241/** \#SS(err) - 0c. */
4242VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4243{
4244 Log(("iemRaiseStackSelectorNotPresentWithErr: cs:rip=%04x:%RX64 uErr=%#x\n",
4245 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4246 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4247}
4248
4249
4250/** \#GP(n) - 0d. */
4251VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4252{
4253 Log(("iemRaiseGeneralProtectionFault: cs:rip=%04x:%RX64 uErr=%#x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4254 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4255}
4256
4257
4258/** \#GP(0) - 0d. */
4259VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4260{
4261 Log(("iemRaiseGeneralProtectionFault0: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4262 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4263}
4264
4265#ifdef IEM_WITH_SETJMP
4266/** \#GP(0) - 0d. */
4267DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4268{
4269 Log(("iemRaiseGeneralProtectionFault0Jmp: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4270 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4271}
4272#endif
4273
4274
4275/** \#GP(sel) - 0d. */
4276VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4277{
4278 Log(("iemRaiseGeneralProtectionFaultBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
4279 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4280 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4281 Sel & ~X86_SEL_RPL, 0);
4282}
4283
4284
4285/** \#GP(0) - 0d. */
4286VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4287{
4288 Log(("iemRaiseNotCanonical: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4289 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4290}
4291
4292
4293/** \#GP(sel) - 0d. */
4294VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4295{
4296 Log(("iemRaiseSelectorBounds: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4297 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4298 NOREF(iSegReg); NOREF(fAccess);
4299 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4300 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4301}
4302
4303#ifdef IEM_WITH_SETJMP
4304/** \#GP(sel) - 0d, longjmp. */
4305DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4306{
4307 Log(("iemRaiseSelectorBoundsJmp: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4308 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4309 NOREF(iSegReg); NOREF(fAccess);
4310 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4311 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4312}
4313#endif
4314
4315/** \#GP(sel) - 0d. */
4316VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4317{
4318 Log(("iemRaiseSelectorBoundsBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
4319 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4320 NOREF(Sel);
4321 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4322}
4323
4324#ifdef IEM_WITH_SETJMP
4325/** \#GP(sel) - 0d, longjmp. */
4326DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP
4327{
4328 Log(("iemRaiseSelectorBoundsBySelectorJmp: cs:rip=%04x:%RX64 Sel=%#x\n",
4329 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4330 NOREF(Sel);
4331 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4332}
4333#endif
4334
4335
4336/** \#GP(sel) - 0d. */
4337VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4338{
4339 Log(("iemRaiseSelectorInvalidAccess: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4340 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4341 NOREF(iSegReg); NOREF(fAccess);
4342 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4343}
4344
4345#ifdef IEM_WITH_SETJMP
4346/** \#GP(sel) - 0d, longjmp. */
4347DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4348{
4349 NOREF(iSegReg); NOREF(fAccess);
4350 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4351}
4352#endif
4353
4354
4355/** \#PF(n) - 0e. */
4356VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT
4357{
4358 uint16_t uErr;
4359 switch (rc)
4360 {
4361 case VERR_PAGE_NOT_PRESENT:
4362 case VERR_PAGE_TABLE_NOT_PRESENT:
4363 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4364 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4365 uErr = 0;
4366 break;
4367
4368 default:
4369 AssertMsgFailed(("%Rrc\n", rc));
4370 RT_FALL_THRU();
4371 case VERR_ACCESS_DENIED:
4372 uErr = X86_TRAP_PF_P;
4373 break;
4374
4375 /** @todo reserved */
4376 }
4377
4378 if (IEM_GET_CPL(pVCpu) == 3)
4379 uErr |= X86_TRAP_PF_US;
4380
4381 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4382 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4383 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4384 uErr |= X86_TRAP_PF_ID;
4385
4386#if 0 /* This is so much non-sense, really. Why was it done like that? */
4387 /* Note! RW access callers reporting a WRITE protection fault, will clear
4388 the READ flag before calling. So, read-modify-write accesses (RW)
4389 can safely be reported as READ faults. */
4390 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4391 uErr |= X86_TRAP_PF_RW;
4392#else
4393 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4394 {
4395 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4396 /// (regardless of outcome of the comparison in the latter case).
4397 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4398 uErr |= X86_TRAP_PF_RW;
4399 }
4400#endif
4401
4402 /* For FXSAVE and FRSTOR the #PF is typically reported at the max address
4403 of the memory operand rather than at the start of it. (Not sure what
4404 happens if it crosses a page boundrary.) The current heuristics for
4405 this is to report the #PF for the last byte if the access is more than
4406 64 bytes. This is probably not correct, but we can work that out later,
4407 main objective now is to get FXSAVE to work like for real hardware and
4408 make bs3-cpu-basic2 work. */
4409 if (cbAccess <= 64)
4410 { /* likely*/ }
4411 else
4412 GCPtrWhere += cbAccess - 1;
4413
4414 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4415 uErr, GCPtrWhere);
4416}
4417
4418#ifdef IEM_WITH_SETJMP
4419/** \#PF(n) - 0e, longjmp. */
4420DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess,
4421 uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP
4422{
4423 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, cbAccess, fAccess, rc)));
4424}
4425#endif
4426
4427
4428/** \#MF(0) - 10. */
4429VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT
4430{
4431 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)
4432 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4433
4434 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
4435 PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
4436 return iemRegUpdateRipAndFinishClearingRF(pVCpu);
4437}
4438
4439
4440/** \#AC(0) - 11. */
4441VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT
4442{
4443 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4444}
4445
4446#ifdef IEM_WITH_SETJMP
4447/** \#AC(0) - 11, longjmp. */
4448DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4449{
4450 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4451}
4452#endif
4453
4454
4455/** \#XF(0)/\#XM(0) - 19. */
4456VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
4457{
4458 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4459}
4460
4461
4462/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4463IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4464{
4465 NOREF(cbInstr);
4466 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4467}
4468
4469
4470/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4471IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4472{
4473 NOREF(cbInstr);
4474 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4475}
4476
4477
4478/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4479IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4480{
4481 NOREF(cbInstr);
4482 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4483}
4484
4485
4486/** @} */
4487
4488/** @name Common opcode decoders.
4489 * @{
4490 */
4491//#include <iprt/mem.h>
4492
4493/**
4494 * Used to add extra details about a stub case.
4495 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4496 */
4497void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4498{
4499#if defined(LOG_ENABLED) && defined(IN_RING3)
4500 PVM pVM = pVCpu->CTX_SUFF(pVM);
4501 char szRegs[4096];
4502 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4503 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4504 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4505 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4506 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4507 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4508 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4509 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4510 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4511 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4512 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4513 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4514 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4515 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4516 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4517 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4518 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4519 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4520 " efer=%016VR{efer}\n"
4521 " pat=%016VR{pat}\n"
4522 " sf_mask=%016VR{sf_mask}\n"
4523 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4524 " lstar=%016VR{lstar}\n"
4525 " star=%016VR{star} cstar=%016VR{cstar}\n"
4526 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4527 );
4528
4529 char szInstr[256];
4530 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4531 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4532 szInstr, sizeof(szInstr), NULL);
4533
4534 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4535#else
4536 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4537#endif
4538}
4539
4540/** @} */
4541
4542
4543
4544/** @name Register Access.
4545 * @{
4546 */
4547
4548/**
4549 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4550 *
4551 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4552 * segment limit.
4553 *
4554 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4555 * @param cbInstr Instruction size.
4556 * @param offNextInstr The offset of the next instruction.
4557 * @param enmEffOpSize Effective operand size.
4558 */
4559VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
4560 IEMMODE enmEffOpSize) RT_NOEXCEPT
4561{
4562 switch (enmEffOpSize)
4563 {
4564 case IEMMODE_16BIT:
4565 {
4566 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
4567 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4568 || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */))
4569 pVCpu->cpum.GstCtx.rip = uNewIp;
4570 else
4571 return iemRaiseGeneralProtectionFault0(pVCpu);
4572 break;
4573 }
4574
4575 case IEMMODE_32BIT:
4576 {
4577 Assert(!IEM_IS_64BIT_CODE(pVCpu));
4578 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4579
4580 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
4581 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4582 pVCpu->cpum.GstCtx.rip = uNewEip;
4583 else
4584 return iemRaiseGeneralProtectionFault0(pVCpu);
4585 break;
4586 }
4587
4588 case IEMMODE_64BIT:
4589 {
4590 Assert(IEM_IS_64BIT_CODE(pVCpu));
4591
4592 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4593 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4594 pVCpu->cpum.GstCtx.rip = uNewRip;
4595 else
4596 return iemRaiseGeneralProtectionFault0(pVCpu);
4597 break;
4598 }
4599
4600 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4601 }
4602
4603#ifndef IEM_WITH_CODE_TLB
4604 /* Flush the prefetch buffer. */
4605 pVCpu->iem.s.cbOpcode = cbInstr;
4606#endif
4607
4608 /*
4609 * Clear RF and finish the instruction (maybe raise #DB).
4610 */
4611 return iemRegFinishClearingRF(pVCpu);
4612}
4613
4614
4615/**
4616 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4617 *
4618 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4619 * segment limit.
4620 *
4621 * @returns Strict VBox status code.
4622 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4623 * @param cbInstr Instruction size.
4624 * @param offNextInstr The offset of the next instruction.
4625 */
4626VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT
4627{
4628 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
4629
4630 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
4631 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4632 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checking in 64-bit mode */))
4633 pVCpu->cpum.GstCtx.rip = uNewIp;
4634 else
4635 return iemRaiseGeneralProtectionFault0(pVCpu);
4636
4637#ifndef IEM_WITH_CODE_TLB
4638 /* Flush the prefetch buffer. */
4639 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4640#endif
4641
4642 /*
4643 * Clear RF and finish the instruction (maybe raise #DB).
4644 */
4645 return iemRegFinishClearingRF(pVCpu);
4646}
4647
4648
4649/**
4650 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4651 *
4652 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4653 * segment limit.
4654 *
4655 * @returns Strict VBox status code.
4656 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4657 * @param cbInstr Instruction size.
4658 * @param offNextInstr The offset of the next instruction.
4659 * @param enmEffOpSize Effective operand size.
4660 */
4661VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
4662 IEMMODE enmEffOpSize) RT_NOEXCEPT
4663{
4664 if (enmEffOpSize == IEMMODE_32BIT)
4665 {
4666 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
4667
4668 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
4669 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4670 pVCpu->cpum.GstCtx.rip = uNewEip;
4671 else
4672 return iemRaiseGeneralProtectionFault0(pVCpu);
4673 }
4674 else
4675 {
4676 Assert(enmEffOpSize == IEMMODE_64BIT);
4677
4678 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4679 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4680 pVCpu->cpum.GstCtx.rip = uNewRip;
4681 else
4682 return iemRaiseGeneralProtectionFault0(pVCpu);
4683 }
4684
4685#ifndef IEM_WITH_CODE_TLB
4686 /* Flush the prefetch buffer. */
4687 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4688#endif
4689
4690 /*
4691 * Clear RF and finish the instruction (maybe raise #DB).
4692 */
4693 return iemRegFinishClearingRF(pVCpu);
4694}
4695
4696
4697/**
4698 * Performs a near jump to the specified address.
4699 *
4700 * May raise a \#GP(0) if the new IP outside the code segment limit.
4701 *
4702 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4703 * @param uNewIp The new IP value.
4704 */
4705VBOXSTRICTRC iemRegRipJumpU16AndFinishClearningRF(PVMCPUCC pVCpu, uint16_t uNewIp) RT_NOEXCEPT
4706{
4707 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4708 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checks in 64-bit mode */))
4709 pVCpu->cpum.GstCtx.rip = uNewIp;
4710 else
4711 return iemRaiseGeneralProtectionFault0(pVCpu);
4712 /** @todo Test 16-bit jump in 64-bit mode. */
4713
4714#ifndef IEM_WITH_CODE_TLB
4715 /* Flush the prefetch buffer. */
4716 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4717#endif
4718
4719 /*
4720 * Clear RF and finish the instruction (maybe raise #DB).
4721 */
4722 return iemRegFinishClearingRF(pVCpu);
4723}
4724
4725
4726/**
4727 * Performs a near jump to the specified address.
4728 *
4729 * May raise a \#GP(0) if the new RIP is outside the code segment limit.
4730 *
4731 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4732 * @param uNewEip The new EIP value.
4733 */
4734VBOXSTRICTRC iemRegRipJumpU32AndFinishClearningRF(PVMCPUCC pVCpu, uint32_t uNewEip) RT_NOEXCEPT
4735{
4736 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4737 Assert(!IEM_IS_64BIT_CODE(pVCpu));
4738
4739 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4740 pVCpu->cpum.GstCtx.rip = uNewEip;
4741 else
4742 return iemRaiseGeneralProtectionFault0(pVCpu);
4743
4744#ifndef IEM_WITH_CODE_TLB
4745 /* Flush the prefetch buffer. */
4746 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4747#endif
4748
4749 /*
4750 * Clear RF and finish the instruction (maybe raise #DB).
4751 */
4752 return iemRegFinishClearingRF(pVCpu);
4753}
4754
4755
4756/**
4757 * Performs a near jump to the specified address.
4758 *
4759 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4760 * segment limit.
4761 *
4762 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4763 * @param uNewRip The new RIP value.
4764 */
4765VBOXSTRICTRC iemRegRipJumpU64AndFinishClearningRF(PVMCPUCC pVCpu, uint64_t uNewRip) RT_NOEXCEPT
4766{
4767 Assert(IEM_IS_64BIT_CODE(pVCpu));
4768
4769 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4770 pVCpu->cpum.GstCtx.rip = uNewRip;
4771 else
4772 return iemRaiseGeneralProtectionFault0(pVCpu);
4773
4774#ifndef IEM_WITH_CODE_TLB
4775 /* Flush the prefetch buffer. */
4776 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4777#endif
4778
4779 /*
4780 * Clear RF and finish the instruction (maybe raise #DB).
4781 */
4782 return iemRegFinishClearingRF(pVCpu);
4783}
4784
4785/** @} */
4786
4787
4788/** @name FPU access and helpers.
4789 *
4790 * @{
4791 */
4792
4793/**
4794 * Updates the x87.DS and FPUDP registers.
4795 *
4796 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4797 * @param pFpuCtx The FPU context.
4798 * @param iEffSeg The effective segment register.
4799 * @param GCPtrEff The effective address relative to @a iEffSeg.
4800 */
4801DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4802{
4803 RTSEL sel;
4804 switch (iEffSeg)
4805 {
4806 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
4807 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
4808 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
4809 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
4810 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
4811 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
4812 default:
4813 AssertMsgFailed(("%d\n", iEffSeg));
4814 sel = pVCpu->cpum.GstCtx.ds.Sel;
4815 }
4816 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
4817 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4818 {
4819 pFpuCtx->DS = 0;
4820 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
4821 }
4822 else if (!IEM_IS_LONG_MODE(pVCpu)) /** @todo this is weird. explain. */
4823 {
4824 pFpuCtx->DS = sel;
4825 pFpuCtx->FPUDP = GCPtrEff;
4826 }
4827 else
4828 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
4829}
4830
4831
4832/**
4833 * Rotates the stack registers in the push direction.
4834 *
4835 * @param pFpuCtx The FPU context.
4836 * @remarks This is a complete waste of time, but fxsave stores the registers in
4837 * stack order.
4838 */
4839DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
4840{
4841 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
4842 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
4843 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
4844 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
4845 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
4846 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
4847 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
4848 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
4849 pFpuCtx->aRegs[0].r80 = r80Tmp;
4850}
4851
4852
4853/**
4854 * Rotates the stack registers in the pop direction.
4855 *
4856 * @param pFpuCtx The FPU context.
4857 * @remarks This is a complete waste of time, but fxsave stores the registers in
4858 * stack order.
4859 */
4860DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
4861{
4862 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
4863 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
4864 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
4865 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
4866 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
4867 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
4868 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
4869 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
4870 pFpuCtx->aRegs[7].r80 = r80Tmp;
4871}
4872
4873
4874/**
4875 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
4876 * exception prevents it.
4877 *
4878 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4879 * @param pResult The FPU operation result to push.
4880 * @param pFpuCtx The FPU context.
4881 */
4882static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4883{
4884 /* Update FSW and bail if there are pending exceptions afterwards. */
4885 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4886 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4887 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4888 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4889 {
4890 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))
4891 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",
4892 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4893 pFpuCtx->FSW = fFsw;
4894 return;
4895 }
4896
4897 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4898 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4899 {
4900 /* All is fine, push the actual value. */
4901 pFpuCtx->FTW |= RT_BIT(iNewTop);
4902 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
4903 }
4904 else if (pFpuCtx->FCW & X86_FCW_IM)
4905 {
4906 /* Masked stack overflow, push QNaN. */
4907 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4908 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4909 }
4910 else
4911 {
4912 /* Raise stack overflow, don't push anything. */
4913 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4914 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4915 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",
4916 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4917 return;
4918 }
4919
4920 fFsw &= ~X86_FSW_TOP_MASK;
4921 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4922 pFpuCtx->FSW = fFsw;
4923
4924 iemFpuRotateStackPush(pFpuCtx);
4925 RT_NOREF(pVCpu);
4926}
4927
4928
4929/**
4930 * Stores a result in a FPU register and updates the FSW and FTW.
4931 *
4932 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4933 * @param pFpuCtx The FPU context.
4934 * @param pResult The result to store.
4935 * @param iStReg Which FPU register to store it in.
4936 */
4937static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4938{
4939 Assert(iStReg < 8);
4940 uint16_t fNewFsw = pFpuCtx->FSW;
4941 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;
4942 fNewFsw &= ~X86_FSW_C_MASK;
4943 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4944 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4945 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4946 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4947 pFpuCtx->FSW = fNewFsw;
4948 pFpuCtx->FTW |= RT_BIT(iReg);
4949 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
4950 RT_NOREF(pVCpu);
4951}
4952
4953
4954/**
4955 * Only updates the FPU status word (FSW) with the result of the current
4956 * instruction.
4957 *
4958 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4959 * @param pFpuCtx The FPU context.
4960 * @param u16FSW The FSW output of the current instruction.
4961 */
4962static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
4963{
4964 uint16_t fNewFsw = pFpuCtx->FSW;
4965 fNewFsw &= ~X86_FSW_C_MASK;
4966 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;
4967 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4968 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4969 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4970 pFpuCtx->FSW = fNewFsw;
4971 RT_NOREF(pVCpu);
4972}
4973
4974
4975/**
4976 * Pops one item off the FPU stack if no pending exception prevents it.
4977 *
4978 * @param pFpuCtx The FPU context.
4979 */
4980static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4981{
4982 /* Check pending exceptions. */
4983 uint16_t uFSW = pFpuCtx->FSW;
4984 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4985 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4986 return;
4987
4988 /* TOP--. */
4989 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
4990 uFSW &= ~X86_FSW_TOP_MASK;
4991 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4992 pFpuCtx->FSW = uFSW;
4993
4994 /* Mark the previous ST0 as empty. */
4995 iOldTop >>= X86_FSW_TOP_SHIFT;
4996 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
4997
4998 /* Rotate the registers. */
4999 iemFpuRotateStackPop(pFpuCtx);
5000}
5001
5002
5003/**
5004 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
5005 *
5006 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5007 * @param pResult The FPU operation result to push.
5008 * @param uFpuOpcode The FPU opcode value.
5009 */
5010void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
5011{
5012 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5013 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5014 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
5015}
5016
5017
5018/**
5019 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
5020 * and sets FPUDP and FPUDS.
5021 *
5022 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5023 * @param pResult The FPU operation result to push.
5024 * @param iEffSeg The effective segment register.
5025 * @param GCPtrEff The effective address relative to @a iEffSeg.
5026 * @param uFpuOpcode The FPU opcode value.
5027 */
5028void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5029 uint16_t uFpuOpcode) RT_NOEXCEPT
5030{
5031 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5032 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5033 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5034 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
5035}
5036
5037
5038/**
5039 * Replace ST0 with the first value and push the second onto the FPU stack,
5040 * unless a pending exception prevents it.
5041 *
5042 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5043 * @param pResult The FPU operation result to store and push.
5044 * @param uFpuOpcode The FPU opcode value.
5045 */
5046void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
5047{
5048 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5049 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5050
5051 /* Update FSW and bail if there are pending exceptions afterwards. */
5052 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5053 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5054 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5055 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5056 {
5057 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
5058 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",
5059 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
5060 pFpuCtx->FSW = fFsw;
5061 return;
5062 }
5063
5064 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5065 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5066 {
5067 /* All is fine, push the actual value. */
5068 pFpuCtx->FTW |= RT_BIT(iNewTop);
5069 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
5070 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
5071 }
5072 else if (pFpuCtx->FCW & X86_FCW_IM)
5073 {
5074 /* Masked stack overflow, push QNaN. */
5075 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5076 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5077 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5078 }
5079 else
5080 {
5081 /* Raise stack overflow, don't push anything. */
5082 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5083 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5084 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",
5085 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5086 return;
5087 }
5088
5089 fFsw &= ~X86_FSW_TOP_MASK;
5090 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5091 pFpuCtx->FSW = fFsw;
5092
5093 iemFpuRotateStackPush(pFpuCtx);
5094}
5095
5096
5097/**
5098 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5099 * FOP.
5100 *
5101 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5102 * @param pResult The result to store.
5103 * @param iStReg Which FPU register to store it in.
5104 * @param uFpuOpcode The FPU opcode value.
5105 */
5106void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5107{
5108 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5109 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5110 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5111}
5112
5113
5114/**
5115 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5116 * FOP, and then pops the stack.
5117 *
5118 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5119 * @param pResult The result to store.
5120 * @param iStReg Which FPU register to store it in.
5121 * @param uFpuOpcode The FPU opcode value.
5122 */
5123void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5124{
5125 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5126 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5127 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5128 iemFpuMaybePopOne(pFpuCtx);
5129}
5130
5131
5132/**
5133 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5134 * FPUDP, and FPUDS.
5135 *
5136 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5137 * @param pResult The result to store.
5138 * @param iStReg Which FPU register to store it in.
5139 * @param iEffSeg The effective memory operand selector register.
5140 * @param GCPtrEff The effective memory operand offset.
5141 * @param uFpuOpcode The FPU opcode value.
5142 */
5143void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5144 uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5145{
5146 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5147 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5148 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5149 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5150}
5151
5152
5153/**
5154 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5155 * FPUDP, and FPUDS, and then pops the stack.
5156 *
5157 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5158 * @param pResult The result to store.
5159 * @param iStReg Which FPU register to store it in.
5160 * @param iEffSeg The effective memory operand selector register.
5161 * @param GCPtrEff The effective memory operand offset.
5162 * @param uFpuOpcode The FPU opcode value.
5163 */
5164void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
5165 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5166{
5167 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5168 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5169 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5170 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5171 iemFpuMaybePopOne(pFpuCtx);
5172}
5173
5174
5175/**
5176 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5177 *
5178 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5179 * @param uFpuOpcode The FPU opcode value.
5180 */
5181void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5182{
5183 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5184 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5185}
5186
5187
5188/**
5189 * Updates the FSW, FOP, FPUIP, and FPUCS.
5190 *
5191 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5192 * @param u16FSW The FSW from the current instruction.
5193 * @param uFpuOpcode The FPU opcode value.
5194 */
5195void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5196{
5197 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5198 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5199 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5200}
5201
5202
5203/**
5204 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5205 *
5206 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5207 * @param u16FSW The FSW from the current instruction.
5208 * @param uFpuOpcode The FPU opcode value.
5209 */
5210void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5211{
5212 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5213 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5214 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5215 iemFpuMaybePopOne(pFpuCtx);
5216}
5217
5218
5219/**
5220 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5221 *
5222 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5223 * @param u16FSW The FSW from the current instruction.
5224 * @param iEffSeg The effective memory operand selector register.
5225 * @param GCPtrEff The effective memory operand offset.
5226 * @param uFpuOpcode The FPU opcode value.
5227 */
5228void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5229{
5230 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5231 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5232 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5233 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5234}
5235
5236
5237/**
5238 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5239 *
5240 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5241 * @param u16FSW The FSW from the current instruction.
5242 * @param uFpuOpcode The FPU opcode value.
5243 */
5244void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5245{
5246 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5247 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5248 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5249 iemFpuMaybePopOne(pFpuCtx);
5250 iemFpuMaybePopOne(pFpuCtx);
5251}
5252
5253
5254/**
5255 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5256 *
5257 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5258 * @param u16FSW The FSW from the current instruction.
5259 * @param iEffSeg The effective memory operand selector register.
5260 * @param GCPtrEff The effective memory operand offset.
5261 * @param uFpuOpcode The FPU opcode value.
5262 */
5263void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5264 uint16_t uFpuOpcode) RT_NOEXCEPT
5265{
5266 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5267 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5268 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5269 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5270 iemFpuMaybePopOne(pFpuCtx);
5271}
5272
5273
5274/**
5275 * Worker routine for raising an FPU stack underflow exception.
5276 *
5277 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5278 * @param pFpuCtx The FPU context.
5279 * @param iStReg The stack register being accessed.
5280 */
5281static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5282{
5283 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5284 if (pFpuCtx->FCW & X86_FCW_IM)
5285 {
5286 /* Masked underflow. */
5287 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5288 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5289 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5290 if (iStReg != UINT8_MAX)
5291 {
5292 pFpuCtx->FTW |= RT_BIT(iReg);
5293 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5294 }
5295 }
5296 else
5297 {
5298 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5299 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5300 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",
5301 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5302 }
5303 RT_NOREF(pVCpu);
5304}
5305
5306
5307/**
5308 * Raises a FPU stack underflow exception.
5309 *
5310 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5311 * @param iStReg The destination register that should be loaded
5312 * with QNaN if \#IS is not masked. Specify
5313 * UINT8_MAX if none (like for fcom).
5314 * @param uFpuOpcode The FPU opcode value.
5315 */
5316void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5317{
5318 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5319 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5320 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5321}
5322
5323
5324void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5325{
5326 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5327 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5328 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5329 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5330}
5331
5332
5333void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5334{
5335 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5336 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5337 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5338 iemFpuMaybePopOne(pFpuCtx);
5339}
5340
5341
5342void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5343 uint16_t uFpuOpcode) RT_NOEXCEPT
5344{
5345 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5346 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5347 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5348 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5349 iemFpuMaybePopOne(pFpuCtx);
5350}
5351
5352
5353void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5354{
5355 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5356 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5357 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
5358 iemFpuMaybePopOne(pFpuCtx);
5359 iemFpuMaybePopOne(pFpuCtx);
5360}
5361
5362
5363void iemFpuStackPushUnderflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5364{
5365 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5366 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5367
5368 if (pFpuCtx->FCW & X86_FCW_IM)
5369 {
5370 /* Masked overflow - Push QNaN. */
5371 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5372 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5373 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5374 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5375 pFpuCtx->FTW |= RT_BIT(iNewTop);
5376 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5377 iemFpuRotateStackPush(pFpuCtx);
5378 }
5379 else
5380 {
5381 /* Exception pending - don't change TOP or the register stack. */
5382 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5383 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5384 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",
5385 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5386 }
5387}
5388
5389
5390void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5391{
5392 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5393 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5394
5395 if (pFpuCtx->FCW & X86_FCW_IM)
5396 {
5397 /* Masked overflow - Push QNaN. */
5398 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5399 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5400 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5401 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5402 pFpuCtx->FTW |= RT_BIT(iNewTop);
5403 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5404 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5405 iemFpuRotateStackPush(pFpuCtx);
5406 }
5407 else
5408 {
5409 /* Exception pending - don't change TOP or the register stack. */
5410 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5411 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5412 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",
5413 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5414 }
5415}
5416
5417
5418/**
5419 * Worker routine for raising an FPU stack overflow exception on a push.
5420 *
5421 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5422 * @param pFpuCtx The FPU context.
5423 */
5424static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5425{
5426 if (pFpuCtx->FCW & X86_FCW_IM)
5427 {
5428 /* Masked overflow. */
5429 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5430 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5431 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5432 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5433 pFpuCtx->FTW |= RT_BIT(iNewTop);
5434 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5435 iemFpuRotateStackPush(pFpuCtx);
5436 }
5437 else
5438 {
5439 /* Exception pending - don't change TOP or the register stack. */
5440 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5441 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5442 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",
5443 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5444 }
5445 RT_NOREF(pVCpu);
5446}
5447
5448
5449/**
5450 * Raises a FPU stack overflow exception on a push.
5451 *
5452 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5453 * @param uFpuOpcode The FPU opcode value.
5454 */
5455void iemFpuStackPushOverflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5456{
5457 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5458 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5459 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5460}
5461
5462
5463/**
5464 * Raises a FPU stack overflow exception on a push with a memory operand.
5465 *
5466 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5467 * @param iEffSeg The effective memory operand selector register.
5468 * @param GCPtrEff The effective memory operand offset.
5469 * @param uFpuOpcode The FPU opcode value.
5470 */
5471void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5472{
5473 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5474 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5475 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5476 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5477}
5478
5479/** @} */
5480
5481
5482/** @name SSE+AVX SIMD access and helpers.
5483 *
5484 * @{
5485 */
5486/**
5487 * Stores a result in a SIMD XMM register, updates the MXCSR.
5488 *
5489 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5490 * @param pResult The result to store.
5491 * @param iXmmReg Which SIMD XMM register to store the result in.
5492 */
5493void iemSseStoreResult(PVMCPUCC pVCpu, PCIEMSSERESULT pResult, uint8_t iXmmReg) RT_NOEXCEPT
5494{
5495 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5496 pFpuCtx->MXCSR |= pResult->MXCSR & X86_MXCSR_XCPT_FLAGS;
5497
5498 /* The result is only updated if there is no unmasked exception pending. */
5499 if (( ~((pFpuCtx->MXCSR & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT)
5500 & (pFpuCtx->MXCSR & X86_MXCSR_XCPT_FLAGS)) == 0)
5501 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXmmReg] = pResult->uResult;
5502}
5503
5504
5505/**
5506 * Updates the MXCSR.
5507 *
5508 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5509 * @param fMxcsr The new MXCSR value.
5510 */
5511void iemSseUpdateMxcsr(PVMCPUCC pVCpu, uint32_t fMxcsr) RT_NOEXCEPT
5512{
5513 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5514 pFpuCtx->MXCSR |= fMxcsr & X86_MXCSR_XCPT_FLAGS;
5515}
5516/** @} */
5517
5518
5519/** @name Memory access.
5520 *
5521 * @{
5522 */
5523
5524#undef LOG_GROUP
5525#define LOG_GROUP LOG_GROUP_IEM_MEM
5526
5527/**
5528 * Updates the IEMCPU::cbWritten counter if applicable.
5529 *
5530 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5531 * @param fAccess The access being accounted for.
5532 * @param cbMem The access size.
5533 */
5534DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5535{
5536 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5537 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5538 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5539}
5540
5541
5542/**
5543 * Applies the segment limit, base and attributes.
5544 *
5545 * This may raise a \#GP or \#SS.
5546 *
5547 * @returns VBox strict status code.
5548 *
5549 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5550 * @param fAccess The kind of access which is being performed.
5551 * @param iSegReg The index of the segment register to apply.
5552 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5553 * TSS, ++).
5554 * @param cbMem The access size.
5555 * @param pGCPtrMem Pointer to the guest memory address to apply
5556 * segmentation to. Input and output parameter.
5557 */
5558VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5559{
5560 if (iSegReg == UINT8_MAX)
5561 return VINF_SUCCESS;
5562
5563 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5564 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5565 switch (IEM_GET_CPU_MODE(pVCpu))
5566 {
5567 case IEMMODE_16BIT:
5568 case IEMMODE_32BIT:
5569 {
5570 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5571 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5572
5573 if ( pSel->Attr.n.u1Present
5574 && !pSel->Attr.n.u1Unusable)
5575 {
5576 Assert(pSel->Attr.n.u1DescType);
5577 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5578 {
5579 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5580 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5581 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5582
5583 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5584 {
5585 /** @todo CPL check. */
5586 }
5587
5588 /*
5589 * There are two kinds of data selectors, normal and expand down.
5590 */
5591 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5592 {
5593 if ( GCPtrFirst32 > pSel->u32Limit
5594 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5595 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5596 }
5597 else
5598 {
5599 /*
5600 * The upper boundary is defined by the B bit, not the G bit!
5601 */
5602 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5603 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5604 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5605 }
5606 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5607 }
5608 else
5609 {
5610 /*
5611 * Code selector and usually be used to read thru, writing is
5612 * only permitted in real and V8086 mode.
5613 */
5614 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5615 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5616 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5617 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5618 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5619
5620 if ( GCPtrFirst32 > pSel->u32Limit
5621 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5622 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5623
5624 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5625 {
5626 /** @todo CPL check. */
5627 }
5628
5629 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5630 }
5631 }
5632 else
5633 return iemRaiseGeneralProtectionFault0(pVCpu);
5634 return VINF_SUCCESS;
5635 }
5636
5637 case IEMMODE_64BIT:
5638 {
5639 RTGCPTR GCPtrMem = *pGCPtrMem;
5640 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5641 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5642
5643 Assert(cbMem >= 1);
5644 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5645 return VINF_SUCCESS;
5646 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5647 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5648 return iemRaiseGeneralProtectionFault0(pVCpu);
5649 }
5650
5651 default:
5652 AssertFailedReturn(VERR_IEM_IPE_7);
5653 }
5654}
5655
5656
5657/**
5658 * Translates a virtual address to a physical physical address and checks if we
5659 * can access the page as specified.
5660 *
5661 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5662 * @param GCPtrMem The virtual address.
5663 * @param cbAccess The access size, for raising \#PF correctly for
5664 * FXSAVE and such.
5665 * @param fAccess The intended access.
5666 * @param pGCPhysMem Where to return the physical address.
5667 */
5668VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess,
5669 uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5670{
5671 /** @todo Need a different PGM interface here. We're currently using
5672 * generic / REM interfaces. this won't cut it for R0. */
5673 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5674 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5675 * here. */
5676 PGMPTWALK Walk;
5677 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
5678 if (RT_FAILURE(rc))
5679 {
5680 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5681 /** @todo Check unassigned memory in unpaged mode. */
5682 /** @todo Reserved bits in page tables. Requires new PGM interface. */
5683#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5684 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5685 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5686#endif
5687 *pGCPhysMem = NIL_RTGCPHYS;
5688 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, rc);
5689 }
5690
5691 /* If the page is writable and does not have the no-exec bit set, all
5692 access is allowed. Otherwise we'll have to check more carefully... */
5693 if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
5694 {
5695 /* Write to read only memory? */
5696 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5697 && !(Walk.fEffective & X86_PTE_RW)
5698 && ( ( IEM_GET_CPL(pVCpu) == 3
5699 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5700 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
5701 {
5702 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5703 *pGCPhysMem = NIL_RTGCPHYS;
5704#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5705 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5706 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5707#endif
5708 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5709 }
5710
5711 /* Kernel memory accessed by userland? */
5712 if ( !(Walk.fEffective & X86_PTE_US)
5713 && IEM_GET_CPL(pVCpu) == 3
5714 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5715 {
5716 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5717 *pGCPhysMem = NIL_RTGCPHYS;
5718#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5719 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5720 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5721#endif
5722 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, VERR_ACCESS_DENIED);
5723 }
5724
5725 /* Executing non-executable memory? */
5726 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
5727 && (Walk.fEffective & X86_PTE_PAE_NX)
5728 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
5729 {
5730 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
5731 *pGCPhysMem = NIL_RTGCPHYS;
5732#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5733 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5734 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5735#endif
5736 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
5737 VERR_ACCESS_DENIED);
5738 }
5739 }
5740
5741 /*
5742 * Set the dirty / access flags.
5743 * ASSUMES this is set when the address is translated rather than on committ...
5744 */
5745 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5746 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
5747 if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty)
5748 {
5749 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
5750 AssertRC(rc2);
5751 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
5752 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
5753 }
5754
5755 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
5756 *pGCPhysMem = GCPhys;
5757 return VINF_SUCCESS;
5758}
5759
5760
5761/**
5762 * Looks up a memory mapping entry.
5763 *
5764 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
5765 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5766 * @param pvMem The memory address.
5767 * @param fAccess The access to.
5768 */
5769DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
5770{
5771 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5772 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
5773 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
5774 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5775 return 0;
5776 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
5777 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5778 return 1;
5779 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
5780 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5781 return 2;
5782 return VERR_NOT_FOUND;
5783}
5784
5785
5786/**
5787 * Finds a free memmap entry when using iNextMapping doesn't work.
5788 *
5789 * @returns Memory mapping index, 1024 on failure.
5790 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5791 */
5792static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
5793{
5794 /*
5795 * The easy case.
5796 */
5797 if (pVCpu->iem.s.cActiveMappings == 0)
5798 {
5799 pVCpu->iem.s.iNextMapping = 1;
5800 return 0;
5801 }
5802
5803 /* There should be enough mappings for all instructions. */
5804 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
5805
5806 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
5807 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
5808 return i;
5809
5810 AssertFailedReturn(1024);
5811}
5812
5813
5814/**
5815 * Commits a bounce buffer that needs writing back and unmaps it.
5816 *
5817 * @returns Strict VBox status code.
5818 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5819 * @param iMemMap The index of the buffer to commit.
5820 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
5821 * Always false in ring-3, obviously.
5822 */
5823static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
5824{
5825 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
5826 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
5827#ifdef IN_RING3
5828 Assert(!fPostponeFail);
5829 RT_NOREF_PV(fPostponeFail);
5830#endif
5831
5832 /*
5833 * Do the writing.
5834 */
5835 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5836 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
5837 {
5838 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
5839 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5840 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5841 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
5842 {
5843 /*
5844 * Carefully and efficiently dealing with access handler return
5845 * codes make this a little bloated.
5846 */
5847 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
5848 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5849 pbBuf,
5850 cbFirst,
5851 PGMACCESSORIGIN_IEM);
5852 if (rcStrict == VINF_SUCCESS)
5853 {
5854 if (cbSecond)
5855 {
5856 rcStrict = PGMPhysWrite(pVM,
5857 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5858 pbBuf + cbFirst,
5859 cbSecond,
5860 PGMACCESSORIGIN_IEM);
5861 if (rcStrict == VINF_SUCCESS)
5862 { /* nothing */ }
5863 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5864 {
5865 LogEx(LOG_GROUP_IEM,
5866 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
5867 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5868 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5869 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5870 }
5871#ifndef IN_RING3
5872 else if (fPostponeFail)
5873 {
5874 LogEx(LOG_GROUP_IEM,
5875 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5876 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5877 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5878 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5879 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5880 return iemSetPassUpStatus(pVCpu, rcStrict);
5881 }
5882#endif
5883 else
5884 {
5885 LogEx(LOG_GROUP_IEM,
5886 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5887 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5888 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5889 return rcStrict;
5890 }
5891 }
5892 }
5893 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5894 {
5895 if (!cbSecond)
5896 {
5897 LogEx(LOG_GROUP_IEM,
5898 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
5899 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5900 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5901 }
5902 else
5903 {
5904 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
5905 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5906 pbBuf + cbFirst,
5907 cbSecond,
5908 PGMACCESSORIGIN_IEM);
5909 if (rcStrict2 == VINF_SUCCESS)
5910 {
5911 LogEx(LOG_GROUP_IEM,
5912 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
5913 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5914 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5915 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5916 }
5917 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5918 {
5919 LogEx(LOG_GROUP_IEM,
5920 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
5921 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5922 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5923 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5924 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5925 }
5926#ifndef IN_RING3
5927 else if (fPostponeFail)
5928 {
5929 LogEx(LOG_GROUP_IEM,
5930 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5931 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5932 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5933 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5934 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5935 return iemSetPassUpStatus(pVCpu, rcStrict);
5936 }
5937#endif
5938 else
5939 {
5940 LogEx(LOG_GROUP_IEM,
5941 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5942 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5943 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5944 return rcStrict2;
5945 }
5946 }
5947 }
5948#ifndef IN_RING3
5949 else if (fPostponeFail)
5950 {
5951 LogEx(LOG_GROUP_IEM,
5952 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5953 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5954 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5955 if (!cbSecond)
5956 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
5957 else
5958 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
5959 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5960 return iemSetPassUpStatus(pVCpu, rcStrict);
5961 }
5962#endif
5963 else
5964 {
5965 LogEx(LOG_GROUP_IEM,
5966 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5967 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5968 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5969 return rcStrict;
5970 }
5971 }
5972 else
5973 {
5974 /*
5975 * No access handlers, much simpler.
5976 */
5977 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
5978 if (RT_SUCCESS(rc))
5979 {
5980 if (cbSecond)
5981 {
5982 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
5983 if (RT_SUCCESS(rc))
5984 { /* likely */ }
5985 else
5986 {
5987 LogEx(LOG_GROUP_IEM,
5988 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5989 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5990 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
5991 return rc;
5992 }
5993 }
5994 }
5995 else
5996 {
5997 LogEx(LOG_GROUP_IEM,
5998 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5999 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
6000 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6001 return rc;
6002 }
6003 }
6004 }
6005
6006#if defined(IEM_LOG_MEMORY_WRITES)
6007 Log5(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
6008 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
6009 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
6010 Log5(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
6011 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
6012 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
6013
6014 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
6015 g_cbIemWrote = cbWrote;
6016 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
6017#endif
6018
6019 /*
6020 * Free the mapping entry.
6021 */
6022 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6023 Assert(pVCpu->iem.s.cActiveMappings != 0);
6024 pVCpu->iem.s.cActiveMappings--;
6025 return VINF_SUCCESS;
6026}
6027
6028
6029/**
6030 * iemMemMap worker that deals with a request crossing pages.
6031 */
6032static VBOXSTRICTRC
6033iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
6034{
6035 Assert(cbMem <= GUEST_PAGE_SIZE);
6036
6037 /*
6038 * Do the address translations.
6039 */
6040 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
6041 RTGCPHYS GCPhysFirst;
6042 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
6043 if (rcStrict != VINF_SUCCESS)
6044 return rcStrict;
6045 Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
6046
6047 uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
6048 RTGCPHYS GCPhysSecond;
6049 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
6050 cbSecondPage, fAccess, &GCPhysSecond);
6051 if (rcStrict != VINF_SUCCESS)
6052 return rcStrict;
6053 Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
6054 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
6055
6056 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6057
6058 /*
6059 * Read in the current memory content if it's a read, execute or partial
6060 * write access.
6061 */
6062 uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6063
6064 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6065 {
6066 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6067 {
6068 /*
6069 * Must carefully deal with access handler status codes here,
6070 * makes the code a bit bloated.
6071 */
6072 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
6073 if (rcStrict == VINF_SUCCESS)
6074 {
6075 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6076 if (rcStrict == VINF_SUCCESS)
6077 { /*likely */ }
6078 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6079 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6080 else
6081 {
6082 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
6083 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6084 return rcStrict;
6085 }
6086 }
6087 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6088 {
6089 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6090 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6091 {
6092 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6093 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6094 }
6095 else
6096 {
6097 LogEx(LOG_GROUP_IEM,
6098 ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6099 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6100 return rcStrict2;
6101 }
6102 }
6103 else
6104 {
6105 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6106 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6107 return rcStrict;
6108 }
6109 }
6110 else
6111 {
6112 /*
6113 * No informational status codes here, much more straight forward.
6114 */
6115 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6116 if (RT_SUCCESS(rc))
6117 {
6118 Assert(rc == VINF_SUCCESS);
6119 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6120 if (RT_SUCCESS(rc))
6121 Assert(rc == VINF_SUCCESS);
6122 else
6123 {
6124 LogEx(LOG_GROUP_IEM,
6125 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6126 return rc;
6127 }
6128 }
6129 else
6130 {
6131 LogEx(LOG_GROUP_IEM,
6132 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6133 return rc;
6134 }
6135 }
6136 }
6137#ifdef VBOX_STRICT
6138 else
6139 memset(pbBuf, 0xcc, cbMem);
6140 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6141 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6142#endif
6143 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
6144
6145 /*
6146 * Commit the bounce buffer entry.
6147 */
6148 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6149 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6150 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6151 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6152 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
6153 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6154 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6155 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6156 pVCpu->iem.s.cActiveMappings++;
6157
6158 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6159 *ppvMem = pbBuf;
6160 return VINF_SUCCESS;
6161}
6162
6163
6164/**
6165 * iemMemMap woker that deals with iemMemPageMap failures.
6166 */
6167static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
6168 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6169{
6170 /*
6171 * Filter out conditions we can handle and the ones which shouldn't happen.
6172 */
6173 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6174 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6175 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6176 {
6177 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6178 return rcMap;
6179 }
6180 pVCpu->iem.s.cPotentialExits++;
6181
6182 /*
6183 * Read in the current memory content if it's a read, execute or partial
6184 * write access.
6185 */
6186 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6187 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6188 {
6189 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6190 memset(pbBuf, 0xff, cbMem);
6191 else
6192 {
6193 int rc;
6194 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6195 {
6196 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6197 if (rcStrict == VINF_SUCCESS)
6198 { /* nothing */ }
6199 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6200 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6201 else
6202 {
6203 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6204 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6205 return rcStrict;
6206 }
6207 }
6208 else
6209 {
6210 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
6211 if (RT_SUCCESS(rc))
6212 { /* likely */ }
6213 else
6214 {
6215 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6216 GCPhysFirst, rc));
6217 return rc;
6218 }
6219 }
6220 }
6221 }
6222#ifdef VBOX_STRICT
6223 else
6224 memset(pbBuf, 0xcc, cbMem);
6225#endif
6226#ifdef VBOX_STRICT
6227 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6228 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6229#endif
6230
6231 /*
6232 * Commit the bounce buffer entry.
6233 */
6234 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6235 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6236 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6237 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
6238 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6239 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6240 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6241 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6242 pVCpu->iem.s.cActiveMappings++;
6243
6244 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6245 *ppvMem = pbBuf;
6246 return VINF_SUCCESS;
6247}
6248
6249
6250
6251/**
6252 * Maps the specified guest memory for the given kind of access.
6253 *
6254 * This may be using bounce buffering of the memory if it's crossing a page
6255 * boundary or if there is an access handler installed for any of it. Because
6256 * of lock prefix guarantees, we're in for some extra clutter when this
6257 * happens.
6258 *
6259 * This may raise a \#GP, \#SS, \#PF or \#AC.
6260 *
6261 * @returns VBox strict status code.
6262 *
6263 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6264 * @param ppvMem Where to return the pointer to the mapped memory.
6265 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
6266 * 8, 12, 16, 32 or 512. When used by string operations
6267 * it can be up to a page.
6268 * @param iSegReg The index of the segment register to use for this
6269 * access. The base and limits are checked. Use UINT8_MAX
6270 * to indicate that no segmentation is required (for IDT,
6271 * GDT and LDT accesses).
6272 * @param GCPtrMem The address of the guest memory.
6273 * @param fAccess How the memory is being accessed. The
6274 * IEM_ACCESS_TYPE_XXX bit is used to figure out how to map
6275 * the memory, while the IEM_ACCESS_WHAT_XXX bit is used
6276 * when raising exceptions.
6277 * @param uAlignCtl Alignment control:
6278 * - Bits 15:0 is the alignment mask.
6279 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6280 * IEM_MEMMAP_F_ALIGN_SSE, and
6281 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6282 * Pass zero to skip alignment.
6283 */
6284VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
6285 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
6286{
6287 /*
6288 * Check the input and figure out which mapping entry to use.
6289 */
6290 Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));
6291 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94
6292 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );
6293 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6294 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6295
6296 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6297 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6298 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6299 {
6300 iMemMap = iemMemMapFindFree(pVCpu);
6301 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6302 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6303 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6304 pVCpu->iem.s.aMemMappings[2].fAccess),
6305 VERR_IEM_IPE_9);
6306 }
6307
6308 /*
6309 * Map the memory, checking that we can actually access it. If something
6310 * slightly complicated happens, fall back on bounce buffering.
6311 */
6312 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6313 if (rcStrict == VINF_SUCCESS)
6314 { /* likely */ }
6315 else
6316 return rcStrict;
6317
6318 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
6319 { /* likely */ }
6320 else
6321 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
6322
6323 /*
6324 * Alignment check.
6325 */
6326 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6327 { /* likelyish */ }
6328 else
6329 {
6330 /* Misaligned access. */
6331 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6332 {
6333 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6334 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6335 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6336 {
6337 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6338
6339 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6340 return iemRaiseAlignmentCheckException(pVCpu);
6341 }
6342 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6343 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6344 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6345 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6346 * that's what FXSAVE does on a 10980xe. */
6347 && iemMemAreAlignmentChecksEnabled(pVCpu))
6348 return iemRaiseAlignmentCheckException(pVCpu);
6349 else
6350 return iemRaiseGeneralProtectionFault0(pVCpu);
6351 }
6352 }
6353
6354#ifdef IEM_WITH_DATA_TLB
6355 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6356
6357 /*
6358 * Get the TLB entry for this page.
6359 */
6360 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6361 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6362 if (pTlbe->uTag == uTag)
6363 {
6364# ifdef VBOX_WITH_STATISTICS
6365 pVCpu->iem.s.DataTlb.cTlbHits++;
6366# endif
6367 }
6368 else
6369 {
6370 pVCpu->iem.s.DataTlb.cTlbMisses++;
6371 PGMPTWALK Walk;
6372 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6373 if (RT_FAILURE(rc))
6374 {
6375 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6376# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6377 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6378 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6379# endif
6380 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6381 }
6382
6383 Assert(Walk.fSucceeded);
6384 pTlbe->uTag = uTag;
6385 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6386 pTlbe->GCPhys = Walk.GCPhys;
6387 pTlbe->pbMappingR3 = NULL;
6388 }
6389
6390 /*
6391 * Check TLB page table level access flags.
6392 */
6393 /* If the page is either supervisor only or non-writable, we need to do
6394 more careful access checks. */
6395 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
6396 {
6397 /* Write to read only memory? */
6398 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
6399 && (fAccess & IEM_ACCESS_TYPE_WRITE)
6400 && ( ( IEM_GET_CPL(pVCpu) == 3
6401 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6402 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
6403 {
6404 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6405# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6406 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6407 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6408# endif
6409 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6410 }
6411
6412 /* Kernel memory accessed by userland? */
6413 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6414 && IEM_GET_CPL(pVCpu) == 3
6415 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6416 {
6417 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6418# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6419 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6420 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6421# endif
6422 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6423 }
6424 }
6425
6426 /*
6427 * Set the dirty / access flags.
6428 * ASSUMES this is set when the address is translated rather than on commit...
6429 */
6430 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6431 uint64_t const fTlbAccessedDirty = (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0) | IEMTLBE_F_PT_NO_ACCESSED;
6432 if (pTlbe->fFlagsAndPhysRev & fTlbAccessedDirty)
6433 {
6434 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6435 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6436 AssertRC(rc2);
6437 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6438 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6439 pTlbe->fFlagsAndPhysRev &= ~fTlbAccessedDirty;
6440 }
6441
6442 /*
6443 * Look up the physical page info if necessary.
6444 */
6445 uint8_t *pbMem = NULL;
6446 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6447# ifdef IN_RING3
6448 pbMem = pTlbe->pbMappingR3;
6449# else
6450 pbMem = NULL;
6451# endif
6452 else
6453 {
6454 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6455 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6456 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6457 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6458 AssertCompile(PGMIEMGCPHYS2PTR_F_CODE_PAGE == IEMTLBE_F_PG_CODE_PAGE);
6459 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
6460 { /* likely */ }
6461 else
6462 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
6463 pTlbe->pbMappingR3 = NULL;
6464 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6465 | IEMTLBE_F_NO_MAPPINGR3
6466 | IEMTLBE_F_PG_NO_READ
6467 | IEMTLBE_F_PG_NO_WRITE
6468 | IEMTLBE_F_PG_UNASSIGNED
6469 | IEMTLBE_F_PG_CODE_PAGE);
6470 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6471 &pbMem, &pTlbe->fFlagsAndPhysRev);
6472 AssertRCReturn(rc, rc);
6473# ifdef IN_RING3
6474 pTlbe->pbMappingR3 = pbMem;
6475# endif
6476 }
6477
6478 /*
6479 * Check the physical page level access and mapping.
6480 */
6481 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
6482 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
6483 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
6484 { /* probably likely */ }
6485 else
6486 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem,
6487 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6488 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6489 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6490 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6491 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6492
6493 if (pbMem)
6494 {
6495 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6496 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6497 fAccess |= IEM_ACCESS_NOT_LOCKED;
6498 }
6499 else
6500 {
6501 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6502 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6503 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6504 if (rcStrict != VINF_SUCCESS)
6505 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6506 }
6507
6508 void * const pvMem = pbMem;
6509
6510 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6511 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6512 if (fAccess & IEM_ACCESS_TYPE_READ)
6513 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6514
6515#else /* !IEM_WITH_DATA_TLB */
6516
6517 RTGCPHYS GCPhysFirst;
6518 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6519 if (rcStrict != VINF_SUCCESS)
6520 return rcStrict;
6521
6522 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6523 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6524 if (fAccess & IEM_ACCESS_TYPE_READ)
6525 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6526
6527 void *pvMem;
6528 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6529 if (rcStrict != VINF_SUCCESS)
6530 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6531
6532#endif /* !IEM_WITH_DATA_TLB */
6533
6534 /*
6535 * Fill in the mapping table entry.
6536 */
6537 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6538 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6539 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6540 pVCpu->iem.s.cActiveMappings += 1;
6541
6542 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6543 *ppvMem = pvMem;
6544
6545 return VINF_SUCCESS;
6546}
6547
6548
6549/**
6550 * Commits the guest memory if bounce buffered and unmaps it.
6551 *
6552 * @returns Strict VBox status code.
6553 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6554 * @param pvMem The mapping.
6555 * @param fAccess The kind of access.
6556 */
6557VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6558{
6559 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6560 AssertReturn(iMemMap >= 0, iMemMap);
6561
6562 /* If it's bounce buffered, we may need to write back the buffer. */
6563 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6564 {
6565 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6566 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6567 }
6568 /* Otherwise unlock it. */
6569 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6570 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6571
6572 /* Free the entry. */
6573 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6574 Assert(pVCpu->iem.s.cActiveMappings != 0);
6575 pVCpu->iem.s.cActiveMappings--;
6576 return VINF_SUCCESS;
6577}
6578
6579
6580/**
6581 * Rolls back the guest memory (conceptually only) and unmaps it.
6582 *
6583 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6584 * @param pvMem The mapping.
6585 * @param fAccess The kind of access.
6586 */
6587void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6588{
6589 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6590 AssertReturnVoid(iMemMap >= 0);
6591
6592 /* Unlock it if necessary. */
6593 if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6594 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6595
6596 /* Free the entry. */
6597 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6598 Assert(pVCpu->iem.s.cActiveMappings != 0);
6599 pVCpu->iem.s.cActiveMappings--;
6600}
6601
6602#ifdef IEM_WITH_SETJMP
6603
6604/**
6605 * Maps the specified guest memory for the given kind of access, longjmp on
6606 * error.
6607 *
6608 * This may be using bounce buffering of the memory if it's crossing a page
6609 * boundary or if there is an access handler installed for any of it. Because
6610 * of lock prefix guarantees, we're in for some extra clutter when this
6611 * happens.
6612 *
6613 * This may raise a \#GP, \#SS, \#PF or \#AC.
6614 *
6615 * @returns Pointer to the mapped memory.
6616 *
6617 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6618 * @param cbMem The number of bytes to map. This is usually 1,
6619 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6620 * string operations it can be up to a page.
6621 * @param iSegReg The index of the segment register to use for
6622 * this access. The base and limits are checked.
6623 * Use UINT8_MAX to indicate that no segmentation
6624 * is required (for IDT, GDT and LDT accesses).
6625 * @param GCPtrMem The address of the guest memory.
6626 * @param fAccess How the memory is being accessed. The
6627 * IEM_ACCESS_TYPE_XXX bit is used to figure out
6628 * how to map the memory, while the
6629 * IEM_ACCESS_WHAT_XXX bit is used when raising
6630 * exceptions.
6631 * @param uAlignCtl Alignment control:
6632 * - Bits 15:0 is the alignment mask.
6633 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6634 * IEM_MEMMAP_F_ALIGN_SSE, and
6635 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6636 * Pass zero to skip alignment.
6637 */
6638void *iemMemMapJmp(PVMCPUCC pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess,
6639 uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
6640{
6641 /*
6642 * Check the input, check segment access and adjust address
6643 * with segment base.
6644 */
6645 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6646 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6647 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6648
6649 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6650 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6651 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6652
6653 /*
6654 * Alignment check.
6655 */
6656 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6657 { /* likelyish */ }
6658 else
6659 {
6660 /* Misaligned access. */
6661 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6662 {
6663 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6664 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6665 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6666 {
6667 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6668
6669 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6670 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6671 }
6672 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6673 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6674 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6675 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6676 * that's what FXSAVE does on a 10980xe. */
6677 && iemMemAreAlignmentChecksEnabled(pVCpu))
6678 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6679 else
6680 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
6681 }
6682 }
6683
6684 /*
6685 * Figure out which mapping entry to use.
6686 */
6687 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6688 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6689 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6690 {
6691 iMemMap = iemMemMapFindFree(pVCpu);
6692 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6693 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6694 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6695 pVCpu->iem.s.aMemMappings[2].fAccess),
6696 IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_9));
6697 }
6698
6699 /*
6700 * Crossing a page boundary?
6701 */
6702 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
6703 { /* No (likely). */ }
6704 else
6705 {
6706 void *pvMem;
6707 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
6708 if (rcStrict == VINF_SUCCESS)
6709 return pvMem;
6710 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6711 }
6712
6713#ifdef IEM_WITH_DATA_TLB
6714 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6715
6716 /*
6717 * Get the TLB entry for this page.
6718 */
6719 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6720 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6721 if (pTlbe->uTag == uTag)
6722 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6723 else
6724 {
6725 pVCpu->iem.s.DataTlb.cTlbMisses++;
6726 PGMPTWALK Walk;
6727 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6728 if (RT_FAILURE(rc))
6729 {
6730 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6731# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6732 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6733 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6734# endif
6735 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6736 }
6737
6738 Assert(Walk.fSucceeded);
6739 pTlbe->uTag = uTag;
6740 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6741 pTlbe->GCPhys = Walk.GCPhys;
6742 pTlbe->pbMappingR3 = NULL;
6743 }
6744
6745 /*
6746 * Check the flags and physical revision.
6747 */
6748 /** @todo make the caller pass these in with fAccess. */
6749 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && IEM_GET_CPL(pVCpu) == 3
6750 ? IEMTLBE_F_PT_NO_USER : 0;
6751 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
6752 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
6753 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
6754 || (IEM_GET_CPL(pVCpu) == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6755 ? IEMTLBE_F_PT_NO_WRITE : 0)
6756 : 0;
6757 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
6758 uint8_t *pbMem = NULL;
6759 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
6760 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6761# ifdef IN_RING3
6762 pbMem = pTlbe->pbMappingR3;
6763# else
6764 pbMem = NULL;
6765# endif
6766 else
6767 {
6768 /*
6769 * Okay, something isn't quite right or needs refreshing.
6770 */
6771 /* Write to read only memory? */
6772 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
6773 {
6774 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6775# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6776 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6777 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6778# endif
6779 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6780 }
6781
6782 /* Kernel memory accessed by userland? */
6783 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
6784 {
6785 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6786# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6787 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6788 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6789# endif
6790 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6791 }
6792
6793 /* Set the dirty / access flags.
6794 ASSUMES this is set when the address is translated rather than on commit... */
6795 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6796 if (pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED))
6797 {
6798 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6799 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6800 AssertRC(rc2);
6801 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6802 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6803 pTlbe->fFlagsAndPhysRev &= ~((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED);
6804 }
6805
6806 /*
6807 * Check if the physical page info needs updating.
6808 */
6809 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6810# ifdef IN_RING3
6811 pbMem = pTlbe->pbMappingR3;
6812# else
6813 pbMem = NULL;
6814# endif
6815 else
6816 {
6817 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6818 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6819 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6820 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6821 AssertCompile(PGMIEMGCPHYS2PTR_F_CODE_PAGE == IEMTLBE_F_PG_CODE_PAGE);
6822 pTlbe->pbMappingR3 = NULL;
6823 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6824 | IEMTLBE_F_NO_MAPPINGR3
6825 | IEMTLBE_F_PG_NO_READ
6826 | IEMTLBE_F_PG_NO_WRITE
6827 | IEMTLBE_F_PG_UNASSIGNED
6828 | IEMTLBE_F_PG_CODE_PAGE);
6829 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6830 &pbMem, &pTlbe->fFlagsAndPhysRev);
6831 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
6832# ifdef IN_RING3
6833 pTlbe->pbMappingR3 = pbMem;
6834# endif
6835 }
6836
6837 /*
6838 * Check the physical page level access and mapping.
6839 */
6840 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
6841 { /* probably likely */ }
6842 else
6843 {
6844 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, cbMem,
6845 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6846 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6847 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6848 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6849 if (rcStrict == VINF_SUCCESS)
6850 return pbMem;
6851 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6852 }
6853 }
6854 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6855
6856 if (pbMem)
6857 {
6858 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6859 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6860 fAccess |= IEM_ACCESS_NOT_LOCKED;
6861 }
6862 else
6863 {
6864 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6865 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6866 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6867 if (rcStrict == VINF_SUCCESS)
6868 return pbMem;
6869 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6870 }
6871
6872 void * const pvMem = pbMem;
6873
6874 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6875 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6876 if (fAccess & IEM_ACCESS_TYPE_READ)
6877 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6878
6879#else /* !IEM_WITH_DATA_TLB */
6880
6881
6882 RTGCPHYS GCPhysFirst;
6883 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6884 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6885 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6886
6887 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6888 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6889 if (fAccess & IEM_ACCESS_TYPE_READ)
6890 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6891
6892 void *pvMem;
6893 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6894 if (rcStrict == VINF_SUCCESS)
6895 { /* likely */ }
6896 else
6897 {
6898 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6899 if (rcStrict == VINF_SUCCESS)
6900 return pvMem;
6901 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6902 }
6903
6904#endif /* !IEM_WITH_DATA_TLB */
6905
6906 /*
6907 * Fill in the mapping table entry.
6908 */
6909 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6910 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6911 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6912 pVCpu->iem.s.cActiveMappings++;
6913
6914 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6915 return pvMem;
6916}
6917
6918
6919/**
6920 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
6921 *
6922 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6923 * @param pvMem The mapping.
6924 * @param fAccess The kind of access.
6925 */
6926void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
6927{
6928 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6929 AssertStmt(iMemMap >= 0, IEM_DO_LONGJMP(pVCpu, iMemMap));
6930
6931 /* If it's bounce buffered, we may need to write back the buffer. */
6932 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6933 {
6934 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6935 {
6936 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6937 if (rcStrict == VINF_SUCCESS)
6938 return;
6939 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6940 }
6941 }
6942 /* Otherwise unlock it. */
6943 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6944 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6945
6946 /* Free the entry. */
6947 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6948 Assert(pVCpu->iem.s.cActiveMappings != 0);
6949 pVCpu->iem.s.cActiveMappings--;
6950}
6951
6952
6953/** Fallback for iemMemCommitAndUnmapRwJmp. */
6954void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, void *pvMem, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP
6955{
6956 Assert(bMapInfo == (1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4)) ); RT_NOREF_PV(bMapInfo);
6957 iemMemCommitAndUnmapJmp(pVCpu, pvMem, IEM_ACCESS_DATA_RW);
6958}
6959
6960
6961/** Fallback for iemMemCommitAndUnmapWoJmp. */
6962void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, void *pvMem, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP
6963{
6964 Assert(bMapInfo == (1 | (IEM_ACCESS_TYPE_WRITE << 4)) ); RT_NOREF_PV(bMapInfo);
6965 iemMemCommitAndUnmapJmp(pVCpu, pvMem, IEM_ACCESS_DATA_W);
6966}
6967
6968
6969/** Fallback for iemMemCommitAndUnmapRoJmp. */
6970void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, const void *pvMem, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP
6971{
6972 Assert(bMapInfo == (1 | (IEM_ACCESS_TYPE_READ << 4)) ); RT_NOREF_PV(bMapInfo);
6973 iemMemCommitAndUnmapJmp(pVCpu, (void *)pvMem, IEM_ACCESS_DATA_R);
6974}
6975
6976
6977/** Fallback for iemMemRollbackAndUnmapWo. */
6978void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, const void *pvMem, uint8_t bMapInfo) RT_NOEXCEPT
6979{
6980 Assert(bMapInfo == (1 | (IEM_ACCESS_TYPE_READ << 4)) ); RT_NOREF_PV(bMapInfo);
6981 iemMemRollbackAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_DATA_R);
6982}
6983
6984#endif /* IEM_WITH_SETJMP */
6985
6986#ifndef IN_RING3
6987/**
6988 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
6989 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
6990 *
6991 * Allows the instruction to be completed and retired, while the IEM user will
6992 * return to ring-3 immediately afterwards and do the postponed writes there.
6993 *
6994 * @returns VBox status code (no strict statuses). Caller must check
6995 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
6996 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6997 * @param pvMem The mapping.
6998 * @param fAccess The kind of access.
6999 */
7000VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
7001{
7002 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
7003 AssertReturn(iMemMap >= 0, iMemMap);
7004
7005 /* If it's bounce buffered, we may need to write back the buffer. */
7006 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7007 {
7008 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7009 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
7010 }
7011 /* Otherwise unlock it. */
7012 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
7013 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7014
7015 /* Free the entry. */
7016 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7017 Assert(pVCpu->iem.s.cActiveMappings != 0);
7018 pVCpu->iem.s.cActiveMappings--;
7019 return VINF_SUCCESS;
7020}
7021#endif
7022
7023
7024/**
7025 * Rollbacks mappings, releasing page locks and such.
7026 *
7027 * The caller shall only call this after checking cActiveMappings.
7028 *
7029 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7030 */
7031void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
7032{
7033 Assert(pVCpu->iem.s.cActiveMappings > 0);
7034
7035 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
7036 while (iMemMap-- > 0)
7037 {
7038 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
7039 if (fAccess != IEM_ACCESS_INVALID)
7040 {
7041 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
7042 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7043 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
7044 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7045 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
7046 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
7047 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
7048 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
7049 pVCpu->iem.s.cActiveMappings--;
7050 }
7051 }
7052}
7053
7054
7055/*
7056 * Instantiate R/W templates.
7057 */
7058#define TMPL_MEM_WITH_STACK
7059
7060#define TMPL_MEM_TYPE uint8_t
7061#define TMPL_MEM_FN_SUFF U8
7062#define TMPL_MEM_FMT_TYPE "%#04x"
7063#define TMPL_MEM_FMT_DESC "byte"
7064#include "IEMAllMemRWTmpl.cpp.h"
7065
7066#define TMPL_MEM_TYPE uint16_t
7067#define TMPL_MEM_FN_SUFF U16
7068#define TMPL_MEM_FMT_TYPE "%#06x"
7069#define TMPL_MEM_FMT_DESC "word"
7070#include "IEMAllMemRWTmpl.cpp.h"
7071
7072#define TMPL_WITH_PUSH_SREG
7073#define TMPL_MEM_TYPE uint32_t
7074#define TMPL_MEM_FN_SUFF U32
7075#define TMPL_MEM_FMT_TYPE "%#010x"
7076#define TMPL_MEM_FMT_DESC "dword"
7077#include "IEMAllMemRWTmpl.cpp.h"
7078#undef TMPL_WITH_PUSH_SREG
7079
7080#define TMPL_MEM_TYPE uint64_t
7081#define TMPL_MEM_FN_SUFF U64
7082#define TMPL_MEM_FMT_TYPE "%#018RX64"
7083#define TMPL_MEM_FMT_DESC "qword"
7084#include "IEMAllMemRWTmpl.cpp.h"
7085
7086#undef TMPL_MEM_WITH_STACK
7087
7088#define TMPL_MEM_TYPE uint64_t
7089#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) * 2 - 1)
7090#define TMPL_MEM_FN_SUFF U64AlignedU128
7091#define TMPL_MEM_FMT_TYPE "%#018RX64"
7092#define TMPL_MEM_FMT_DESC "qword"
7093#include "IEMAllMemRWTmpl.cpp.h"
7094
7095/* See IEMAllMemRWTmplInline.cpp.h */
7096#define TMPL_MEM_BY_REF
7097
7098#define TMPL_MEM_TYPE RTFLOAT80U
7099#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1)
7100#define TMPL_MEM_FN_SUFF R80
7101#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
7102#define TMPL_MEM_FMT_DESC "tword"
7103#include "IEMAllMemRWTmpl.cpp.h"
7104
7105
7106/**
7107 * Fetches a data dword and zero extends it to a qword.
7108 *
7109 * @returns Strict VBox status code.
7110 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7111 * @param pu64Dst Where to return the qword.
7112 * @param iSegReg The index of the segment register to use for
7113 * this access. The base and limits are checked.
7114 * @param GCPtrMem The address of the guest memory.
7115 */
7116VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7117{
7118 /* The lazy approach for now... */
7119 uint32_t const *pu32Src;
7120 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
7121 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7122 if (rc == VINF_SUCCESS)
7123 {
7124 *pu64Dst = *pu32Src;
7125 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7126 Log(("IEM RD dword %d|%RGv: %#010RX64\n", iSegReg, GCPtrMem, *pu64Dst));
7127 }
7128 return rc;
7129}
7130
7131
7132#ifdef SOME_UNUSED_FUNCTION
7133/**
7134 * Fetches a data dword and sign extends it to a qword.
7135 *
7136 * @returns Strict VBox status code.
7137 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7138 * @param pu64Dst Where to return the sign extended value.
7139 * @param iSegReg The index of the segment register to use for
7140 * this access. The base and limits are checked.
7141 * @param GCPtrMem The address of the guest memory.
7142 */
7143VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7144{
7145 /* The lazy approach for now... */
7146 int32_t const *pi32Src;
7147 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem,
7148 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
7149 if (rc == VINF_SUCCESS)
7150 {
7151 *pu64Dst = *pi32Src;
7152 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
7153 Log(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, (uint32_t)*pu64Dst));
7154 }
7155#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7156 else
7157 *pu64Dst = 0;
7158#endif
7159 return rc;
7160}
7161#endif
7162
7163
7164/**
7165 * Fetches a data decimal tword.
7166 *
7167 * @returns Strict VBox status code.
7168 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7169 * @param pd80Dst Where to return the tword.
7170 * @param iSegReg The index of the segment register to use for
7171 * this access. The base and limits are checked.
7172 * @param GCPtrMem The address of the guest memory.
7173 */
7174VBOXSTRICTRC iemMemFetchDataD80(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7175{
7176 /* The lazy approach for now... */
7177 PCRTPBCD80U pd80Src;
7178 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pd80Src, sizeof(*pd80Src), iSegReg, GCPtrMem,
7179 IEM_ACCESS_DATA_R, 7 /** @todo FBLD alignment check */);
7180 if (rc == VINF_SUCCESS)
7181 {
7182 *pd80Dst = *pd80Src;
7183 rc = iemMemCommitAndUnmap(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
7184 Log(("IEM RD tword %d|%RGv: %.10Rhxs\n", iSegReg, GCPtrMem, pd80Dst));
7185 }
7186 return rc;
7187}
7188
7189
7190#ifdef IEM_WITH_SETJMP
7191/**
7192 * Fetches a data decimal tword, longjmp on error.
7193 *
7194 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7195 * @param pd80Dst Where to return the tword.
7196 * @param iSegReg The index of the segment register to use for
7197 * this access. The base and limits are checked.
7198 * @param GCPtrMem The address of the guest memory.
7199 */
7200void iemMemFetchDataD80Jmp(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7201{
7202 /* The lazy approach for now... */
7203 PCRTPBCD80U pd80Src = (PCRTPBCD80U)iemMemMapJmp(pVCpu, sizeof(*pd80Src), iSegReg, GCPtrMem,
7204 IEM_ACCESS_DATA_R, 7 /** @todo FBSTP alignment check */);
7205 *pd80Dst = *pd80Src;
7206 iemMemCommitAndUnmapJmp(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
7207 Log(("IEM RD tword %d|%RGv: %.10Rhxs\n", iSegReg, GCPtrMem, pd80Dst));
7208}
7209#endif
7210
7211
7212/**
7213 * Fetches a data dqword (double qword), generally SSE related.
7214 *
7215 * @returns Strict VBox status code.
7216 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7217 * @param pu128Dst Where to return the qword.
7218 * @param iSegReg The index of the segment register to use for
7219 * this access. The base and limits are checked.
7220 * @param GCPtrMem The address of the guest memory.
7221 */
7222VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7223{
7224 /* The lazy approach for now... */
7225 PCRTUINT128U pu128Src;
7226 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
7227 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7228 if (rc == VINF_SUCCESS)
7229 {
7230 pu128Dst->au64[0] = pu128Src->au64[0];
7231 pu128Dst->au64[1] = pu128Src->au64[1];
7232 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7233 Log(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7234 }
7235 return rc;
7236}
7237
7238
7239#ifdef IEM_WITH_SETJMP
7240/**
7241 * Fetches a data dqword (double qword), generally SSE related.
7242 *
7243 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7244 * @param pu128Dst Where to return the qword.
7245 * @param iSegReg The index of the segment register to use for
7246 * this access. The base and limits are checked.
7247 * @param GCPtrMem The address of the guest memory.
7248 */
7249void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7250{
7251 /* The lazy approach for now... */
7252 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem,
7253 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7254 pu128Dst->au64[0] = pu128Src->au64[0];
7255 pu128Dst->au64[1] = pu128Src->au64[1];
7256 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7257 Log(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7258}
7259#endif
7260
7261
7262/**
7263 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7264 * related.
7265 *
7266 * Raises \#GP(0) if not aligned.
7267 *
7268 * @returns Strict VBox status code.
7269 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7270 * @param pu128Dst Where to return the qword.
7271 * @param iSegReg The index of the segment register to use for
7272 * this access. The base and limits are checked.
7273 * @param GCPtrMem The address of the guest memory.
7274 */
7275VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7276{
7277 /* The lazy approach for now... */
7278 PCRTUINT128U pu128Src;
7279 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
7280 IEM_ACCESS_DATA_R, (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7281 if (rc == VINF_SUCCESS)
7282 {
7283 pu128Dst->au64[0] = pu128Src->au64[0];
7284 pu128Dst->au64[1] = pu128Src->au64[1];
7285 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7286 Log(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7287 }
7288 return rc;
7289}
7290
7291
7292#ifdef IEM_WITH_SETJMP
7293/**
7294 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7295 * related, longjmp on error.
7296 *
7297 * Raises \#GP(0) if not aligned.
7298 *
7299 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7300 * @param pu128Dst Where to return the qword.
7301 * @param iSegReg The index of the segment register to use for
7302 * this access. The base and limits are checked.
7303 * @param GCPtrMem The address of the guest memory.
7304 */
7305void iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg,
7306 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7307{
7308 /* The lazy approach for now... */
7309 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7310 (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7311 pu128Dst->au64[0] = pu128Src->au64[0];
7312 pu128Dst->au64[1] = pu128Src->au64[1];
7313 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7314 Log(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7315}
7316#endif
7317
7318
7319/**
7320 * Fetches a data oword (octo word), generally AVX related.
7321 *
7322 * @returns Strict VBox status code.
7323 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7324 * @param pu256Dst Where to return the qword.
7325 * @param iSegReg The index of the segment register to use for
7326 * this access. The base and limits are checked.
7327 * @param GCPtrMem The address of the guest memory.
7328 */
7329VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7330{
7331 /* The lazy approach for now... */
7332 PCRTUINT256U pu256Src;
7333 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7334 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7335 if (rc == VINF_SUCCESS)
7336 {
7337 pu256Dst->au64[0] = pu256Src->au64[0];
7338 pu256Dst->au64[1] = pu256Src->au64[1];
7339 pu256Dst->au64[2] = pu256Src->au64[2];
7340 pu256Dst->au64[3] = pu256Src->au64[3];
7341 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7342 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7343 }
7344 return rc;
7345}
7346
7347
7348#ifdef IEM_WITH_SETJMP
7349/**
7350 * Fetches a data oword (octo word), generally AVX related.
7351 *
7352 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7353 * @param pu256Dst Where to return the qword.
7354 * @param iSegReg The index of the segment register to use for
7355 * this access. The base and limits are checked.
7356 * @param GCPtrMem The address of the guest memory.
7357 */
7358void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7359{
7360 /* The lazy approach for now... */
7361 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem,
7362 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7363 pu256Dst->au64[0] = pu256Src->au64[0];
7364 pu256Dst->au64[1] = pu256Src->au64[1];
7365 pu256Dst->au64[2] = pu256Src->au64[2];
7366 pu256Dst->au64[3] = pu256Src->au64[3];
7367 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7368 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7369}
7370#endif
7371
7372
7373/**
7374 * Fetches a data oword (octo word) at an aligned address, generally AVX
7375 * related.
7376 *
7377 * Raises \#GP(0) if not aligned.
7378 *
7379 * @returns Strict VBox status code.
7380 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7381 * @param pu256Dst Where to return the qword.
7382 * @param iSegReg The index of the segment register to use for
7383 * this access. The base and limits are checked.
7384 * @param GCPtrMem The address of the guest memory.
7385 */
7386VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7387{
7388 /* The lazy approach for now... */
7389 PCRTUINT256U pu256Src;
7390 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7391 IEM_ACCESS_DATA_R, (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7392 if (rc == VINF_SUCCESS)
7393 {
7394 pu256Dst->au64[0] = pu256Src->au64[0];
7395 pu256Dst->au64[1] = pu256Src->au64[1];
7396 pu256Dst->au64[2] = pu256Src->au64[2];
7397 pu256Dst->au64[3] = pu256Src->au64[3];
7398 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7399 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7400 }
7401 return rc;
7402}
7403
7404
7405#ifdef IEM_WITH_SETJMP
7406/**
7407 * Fetches a data oword (octo word) at an aligned address, generally AVX
7408 * related, longjmp on error.
7409 *
7410 * Raises \#GP(0) if not aligned.
7411 *
7412 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7413 * @param pu256Dst Where to return the qword.
7414 * @param iSegReg The index of the segment register to use for
7415 * this access. The base and limits are checked.
7416 * @param GCPtrMem The address of the guest memory.
7417 */
7418void iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg,
7419 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7420{
7421 /* The lazy approach for now... */
7422 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7423 (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7424 pu256Dst->au64[0] = pu256Src->au64[0];
7425 pu256Dst->au64[1] = pu256Src->au64[1];
7426 pu256Dst->au64[2] = pu256Src->au64[2];
7427 pu256Dst->au64[3] = pu256Src->au64[3];
7428 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7429 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7430}
7431#endif
7432
7433
7434
7435/**
7436 * Fetches a descriptor register (lgdt, lidt).
7437 *
7438 * @returns Strict VBox status code.
7439 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7440 * @param pcbLimit Where to return the limit.
7441 * @param pGCPtrBase Where to return the base.
7442 * @param iSegReg The index of the segment register to use for
7443 * this access. The base and limits are checked.
7444 * @param GCPtrMem The address of the guest memory.
7445 * @param enmOpSize The effective operand size.
7446 */
7447VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7448 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7449{
7450 /*
7451 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7452 * little special:
7453 * - The two reads are done separately.
7454 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7455 * - We suspect the 386 to actually commit the limit before the base in
7456 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7457 * don't try emulate this eccentric behavior, because it's not well
7458 * enough understood and rather hard to trigger.
7459 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7460 */
7461 VBOXSTRICTRC rcStrict;
7462 if (IEM_IS_64BIT_CODE(pVCpu))
7463 {
7464 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7465 if (rcStrict == VINF_SUCCESS)
7466 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7467 }
7468 else
7469 {
7470 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7471 if (enmOpSize == IEMMODE_32BIT)
7472 {
7473 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7474 {
7475 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7476 if (rcStrict == VINF_SUCCESS)
7477 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7478 }
7479 else
7480 {
7481 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7482 if (rcStrict == VINF_SUCCESS)
7483 {
7484 *pcbLimit = (uint16_t)uTmp;
7485 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7486 }
7487 }
7488 if (rcStrict == VINF_SUCCESS)
7489 *pGCPtrBase = uTmp;
7490 }
7491 else
7492 {
7493 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7494 if (rcStrict == VINF_SUCCESS)
7495 {
7496 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7497 if (rcStrict == VINF_SUCCESS)
7498 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7499 }
7500 }
7501 }
7502 return rcStrict;
7503}
7504
7505
7506/**
7507 * Stores a data dqword.
7508 *
7509 * @returns Strict VBox status code.
7510 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7511 * @param iSegReg The index of the segment register to use for
7512 * this access. The base and limits are checked.
7513 * @param GCPtrMem The address of the guest memory.
7514 * @param u128Value The value to store.
7515 */
7516VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7517{
7518 /* The lazy approach for now... */
7519 PRTUINT128U pu128Dst;
7520 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7521 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7522 if (rc == VINF_SUCCESS)
7523 {
7524 pu128Dst->au64[0] = u128Value.au64[0];
7525 pu128Dst->au64[1] = u128Value.au64[1];
7526 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7527 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7528 }
7529 return rc;
7530}
7531
7532
7533#ifdef IEM_WITH_SETJMP
7534/**
7535 * Stores a data dqword, longjmp on error.
7536 *
7537 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7538 * @param iSegReg The index of the segment register to use for
7539 * this access. The base and limits are checked.
7540 * @param GCPtrMem The address of the guest memory.
7541 * @param u128Value The value to store.
7542 */
7543void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7544{
7545 /* The lazy approach for now... */
7546 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7547 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7548 pu128Dst->au64[0] = u128Value.au64[0];
7549 pu128Dst->au64[1] = u128Value.au64[1];
7550 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7551 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7552}
7553#endif
7554
7555
7556/**
7557 * Stores a data dqword, SSE aligned.
7558 *
7559 * @returns Strict VBox status code.
7560 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7561 * @param iSegReg The index of the segment register to use for
7562 * this access. The base and limits are checked.
7563 * @param GCPtrMem The address of the guest memory.
7564 * @param u128Value The value to store.
7565 */
7566VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7567{
7568 /* The lazy approach for now... */
7569 PRTUINT128U pu128Dst;
7570 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7571 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7572 if (rc == VINF_SUCCESS)
7573 {
7574 pu128Dst->au64[0] = u128Value.au64[0];
7575 pu128Dst->au64[1] = u128Value.au64[1];
7576 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7577 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7578 }
7579 return rc;
7580}
7581
7582
7583#ifdef IEM_WITH_SETJMP
7584/**
7585 * Stores a data dqword, SSE aligned.
7586 *
7587 * @returns Strict VBox status code.
7588 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7589 * @param iSegReg The index of the segment register to use for
7590 * this access. The base and limits are checked.
7591 * @param GCPtrMem The address of the guest memory.
7592 * @param u128Value The value to store.
7593 */
7594void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7595 RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7596{
7597 /* The lazy approach for now... */
7598 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7599 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7600 pu128Dst->au64[0] = u128Value.au64[0];
7601 pu128Dst->au64[1] = u128Value.au64[1];
7602 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7603 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7604}
7605#endif
7606
7607
7608/**
7609 * Stores a data dqword.
7610 *
7611 * @returns Strict VBox status code.
7612 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7613 * @param iSegReg The index of the segment register to use for
7614 * this access. The base and limits are checked.
7615 * @param GCPtrMem The address of the guest memory.
7616 * @param pu256Value Pointer to the value to store.
7617 */
7618VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7619{
7620 /* The lazy approach for now... */
7621 PRTUINT256U pu256Dst;
7622 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7623 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7624 if (rc == VINF_SUCCESS)
7625 {
7626 pu256Dst->au64[0] = pu256Value->au64[0];
7627 pu256Dst->au64[1] = pu256Value->au64[1];
7628 pu256Dst->au64[2] = pu256Value->au64[2];
7629 pu256Dst->au64[3] = pu256Value->au64[3];
7630 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7631 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7632 }
7633 return rc;
7634}
7635
7636
7637#ifdef IEM_WITH_SETJMP
7638/**
7639 * Stores a data dqword, longjmp on error.
7640 *
7641 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7642 * @param iSegReg The index of the segment register to use for
7643 * this access. The base and limits are checked.
7644 * @param GCPtrMem The address of the guest memory.
7645 * @param pu256Value Pointer to the value to store.
7646 */
7647void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7648{
7649 /* The lazy approach for now... */
7650 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7651 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7652 pu256Dst->au64[0] = pu256Value->au64[0];
7653 pu256Dst->au64[1] = pu256Value->au64[1];
7654 pu256Dst->au64[2] = pu256Value->au64[2];
7655 pu256Dst->au64[3] = pu256Value->au64[3];
7656 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7657 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7658}
7659#endif
7660
7661
7662/**
7663 * Stores a data dqword, AVX \#GP(0) aligned.
7664 *
7665 * @returns Strict VBox status code.
7666 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7667 * @param iSegReg The index of the segment register to use for
7668 * this access. The base and limits are checked.
7669 * @param GCPtrMem The address of the guest memory.
7670 * @param pu256Value Pointer to the value to store.
7671 */
7672VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7673{
7674 /* The lazy approach for now... */
7675 PRTUINT256U pu256Dst;
7676 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7677 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7678 if (rc == VINF_SUCCESS)
7679 {
7680 pu256Dst->au64[0] = pu256Value->au64[0];
7681 pu256Dst->au64[1] = pu256Value->au64[1];
7682 pu256Dst->au64[2] = pu256Value->au64[2];
7683 pu256Dst->au64[3] = pu256Value->au64[3];
7684 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7685 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7686 }
7687 return rc;
7688}
7689
7690
7691#ifdef IEM_WITH_SETJMP
7692/**
7693 * Stores a data dqword, AVX aligned.
7694 *
7695 * @returns Strict VBox status code.
7696 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7697 * @param iSegReg The index of the segment register to use for
7698 * this access. The base and limits are checked.
7699 * @param GCPtrMem The address of the guest memory.
7700 * @param pu256Value Pointer to the value to store.
7701 */
7702void iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7703 PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7704{
7705 /* The lazy approach for now... */
7706 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7707 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7708 pu256Dst->au64[0] = pu256Value->au64[0];
7709 pu256Dst->au64[1] = pu256Value->au64[1];
7710 pu256Dst->au64[2] = pu256Value->au64[2];
7711 pu256Dst->au64[3] = pu256Value->au64[3];
7712 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7713 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7714}
7715#endif
7716
7717
7718/**
7719 * Stores a descriptor register (sgdt, sidt).
7720 *
7721 * @returns Strict VBox status code.
7722 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7723 * @param cbLimit The limit.
7724 * @param GCPtrBase The base address.
7725 * @param iSegReg The index of the segment register to use for
7726 * this access. The base and limits are checked.
7727 * @param GCPtrMem The address of the guest memory.
7728 */
7729VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7730{
7731 /*
7732 * The SIDT and SGDT instructions actually stores the data using two
7733 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
7734 * does not respond to opsize prefixes.
7735 */
7736 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
7737 if (rcStrict == VINF_SUCCESS)
7738 {
7739 if (IEM_IS_16BIT_CODE(pVCpu))
7740 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
7741 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
7742 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
7743 else if (IEM_IS_32BIT_CODE(pVCpu))
7744 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
7745 else
7746 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
7747 }
7748 return rcStrict;
7749}
7750
7751
7752/**
7753 * Begin a special stack push (used by interrupt, exceptions and such).
7754 *
7755 * This will raise \#SS or \#PF if appropriate.
7756 *
7757 * @returns Strict VBox status code.
7758 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7759 * @param cbMem The number of bytes to push onto the stack.
7760 * @param cbAlign The alignment mask (7, 3, 1).
7761 * @param ppvMem Where to return the pointer to the stack memory.
7762 * As with the other memory functions this could be
7763 * direct access or bounce buffered access, so
7764 * don't commit register until the commit call
7765 * succeeds.
7766 * @param puNewRsp Where to return the new RSP value. This must be
7767 * passed unchanged to
7768 * iemMemStackPushCommitSpecial().
7769 */
7770VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
7771 void **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
7772{
7773 Assert(cbMem < UINT8_MAX);
7774 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
7775 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop,
7776 IEM_ACCESS_STACK_W, cbAlign);
7777}
7778
7779
7780/**
7781 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
7782 *
7783 * This will update the rSP.
7784 *
7785 * @returns Strict VBox status code.
7786 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7787 * @param pvMem The pointer returned by
7788 * iemMemStackPushBeginSpecial().
7789 * @param uNewRsp The new RSP value returned by
7790 * iemMemStackPushBeginSpecial().
7791 */
7792VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp) RT_NOEXCEPT
7793{
7794 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
7795 if (rcStrict == VINF_SUCCESS)
7796 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7797 return rcStrict;
7798}
7799
7800
7801/**
7802 * Begin a special stack pop (used by iret, retf and such).
7803 *
7804 * This will raise \#SS or \#PF if appropriate.
7805 *
7806 * @returns Strict VBox status code.
7807 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7808 * @param cbMem The number of bytes to pop from the stack.
7809 * @param cbAlign The alignment mask (7, 3, 1).
7810 * @param ppvMem Where to return the pointer to the stack memory.
7811 * @param puNewRsp Where to return the new RSP value. This must be
7812 * assigned to CPUMCTX::rsp manually some time
7813 * after iemMemStackPopDoneSpecial() has been
7814 * called.
7815 */
7816VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
7817 void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
7818{
7819 Assert(cbMem < UINT8_MAX);
7820 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
7821 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
7822}
7823
7824
7825/**
7826 * Continue a special stack pop (used by iret and retf), for the purpose of
7827 * retrieving a new stack pointer.
7828 *
7829 * This will raise \#SS or \#PF if appropriate.
7830 *
7831 * @returns Strict VBox status code.
7832 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7833 * @param off Offset from the top of the stack. This is zero
7834 * except in the retf case.
7835 * @param cbMem The number of bytes to pop from the stack.
7836 * @param ppvMem Where to return the pointer to the stack memory.
7837 * @param uCurNewRsp The current uncommitted RSP value. (No need to
7838 * return this because all use of this function is
7839 * to retrieve a new value and anything we return
7840 * here would be discarded.)
7841 */
7842VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
7843 void const **ppvMem, uint64_t uCurNewRsp) RT_NOEXCEPT
7844{
7845 Assert(cbMem < UINT8_MAX);
7846
7847 /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */
7848 RTGCPTR GCPtrTop;
7849 if (IEM_IS_64BIT_CODE(pVCpu))
7850 GCPtrTop = uCurNewRsp;
7851 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
7852 GCPtrTop = (uint32_t)uCurNewRsp;
7853 else
7854 GCPtrTop = (uint16_t)uCurNewRsp;
7855
7856 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
7857 0 /* checked in iemMemStackPopBeginSpecial */);
7858}
7859
7860
7861/**
7862 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
7863 * iemMemStackPopContinueSpecial).
7864 *
7865 * The caller will manually commit the rSP.
7866 *
7867 * @returns Strict VBox status code.
7868 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7869 * @param pvMem The pointer returned by
7870 * iemMemStackPopBeginSpecial() or
7871 * iemMemStackPopContinueSpecial().
7872 */
7873VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem) RT_NOEXCEPT
7874{
7875 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
7876}
7877
7878
7879/**
7880 * Fetches a system table byte.
7881 *
7882 * @returns Strict VBox status code.
7883 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7884 * @param pbDst Where to return the byte.
7885 * @param iSegReg The index of the segment register to use for
7886 * this access. The base and limits are checked.
7887 * @param GCPtrMem The address of the guest memory.
7888 */
7889VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7890{
7891 /* The lazy approach for now... */
7892 uint8_t const *pbSrc;
7893 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7894 if (rc == VINF_SUCCESS)
7895 {
7896 *pbDst = *pbSrc;
7897 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
7898 }
7899 return rc;
7900}
7901
7902
7903/**
7904 * Fetches a system table word.
7905 *
7906 * @returns Strict VBox status code.
7907 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7908 * @param pu16Dst Where to return the word.
7909 * @param iSegReg The index of the segment register to use for
7910 * this access. The base and limits are checked.
7911 * @param GCPtrMem The address of the guest memory.
7912 */
7913VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7914{
7915 /* The lazy approach for now... */
7916 uint16_t const *pu16Src;
7917 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7918 if (rc == VINF_SUCCESS)
7919 {
7920 *pu16Dst = *pu16Src;
7921 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
7922 }
7923 return rc;
7924}
7925
7926
7927/**
7928 * Fetches a system table dword.
7929 *
7930 * @returns Strict VBox status code.
7931 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7932 * @param pu32Dst Where to return the dword.
7933 * @param iSegReg The index of the segment register to use for
7934 * this access. The base and limits are checked.
7935 * @param GCPtrMem The address of the guest memory.
7936 */
7937VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7938{
7939 /* The lazy approach for now... */
7940 uint32_t const *pu32Src;
7941 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7942 if (rc == VINF_SUCCESS)
7943 {
7944 *pu32Dst = *pu32Src;
7945 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
7946 }
7947 return rc;
7948}
7949
7950
7951/**
7952 * Fetches a system table qword.
7953 *
7954 * @returns Strict VBox status code.
7955 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7956 * @param pu64Dst Where to return the qword.
7957 * @param iSegReg The index of the segment register to use for
7958 * this access. The base and limits are checked.
7959 * @param GCPtrMem The address of the guest memory.
7960 */
7961VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7962{
7963 /* The lazy approach for now... */
7964 uint64_t const *pu64Src;
7965 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7966 if (rc == VINF_SUCCESS)
7967 {
7968 *pu64Dst = *pu64Src;
7969 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
7970 }
7971 return rc;
7972}
7973
7974
7975/**
7976 * Fetches a descriptor table entry with caller specified error code.
7977 *
7978 * @returns Strict VBox status code.
7979 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7980 * @param pDesc Where to return the descriptor table entry.
7981 * @param uSel The selector which table entry to fetch.
7982 * @param uXcpt The exception to raise on table lookup error.
7983 * @param uErrorCode The error code associated with the exception.
7984 */
7985static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
7986 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
7987{
7988 AssertPtr(pDesc);
7989 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
7990
7991 /** @todo did the 286 require all 8 bytes to be accessible? */
7992 /*
7993 * Get the selector table base and check bounds.
7994 */
7995 RTGCPTR GCPtrBase;
7996 if (uSel & X86_SEL_LDT)
7997 {
7998 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
7999 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
8000 {
8001 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8002 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
8003 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8004 uErrorCode, 0);
8005 }
8006
8007 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
8008 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
8009 }
8010 else
8011 {
8012 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
8013 {
8014 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
8015 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8016 uErrorCode, 0);
8017 }
8018 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
8019 }
8020
8021 /*
8022 * Read the legacy descriptor and maybe the long mode extensions if
8023 * required.
8024 */
8025 VBOXSTRICTRC rcStrict;
8026 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
8027 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8028 else
8029 {
8030 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
8031 if (rcStrict == VINF_SUCCESS)
8032 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
8033 if (rcStrict == VINF_SUCCESS)
8034 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
8035 if (rcStrict == VINF_SUCCESS)
8036 pDesc->Legacy.au16[3] = 0;
8037 else
8038 return rcStrict;
8039 }
8040
8041 if (rcStrict == VINF_SUCCESS)
8042 {
8043 if ( !IEM_IS_LONG_MODE(pVCpu)
8044 || pDesc->Legacy.Gen.u1DescType)
8045 pDesc->Long.au64[1] = 0;
8046 else if ( (uint32_t)(uSel | X86_SEL_RPL_LDT) + 8
8047 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
8048 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8049 else
8050 {
8051 LogEx(LOG_GROUP_IEM,("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8052 /** @todo is this the right exception? */
8053 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8054 }
8055 }
8056 return rcStrict;
8057}
8058
8059
8060/**
8061 * Fetches a descriptor table entry.
8062 *
8063 * @returns Strict VBox status code.
8064 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8065 * @param pDesc Where to return the descriptor table entry.
8066 * @param uSel The selector which table entry to fetch.
8067 * @param uXcpt The exception to raise on table lookup error.
8068 */
8069VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
8070{
8071 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8072}
8073
8074
8075/**
8076 * Marks the selector descriptor as accessed (only non-system descriptors).
8077 *
8078 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8079 * will therefore skip the limit checks.
8080 *
8081 * @returns Strict VBox status code.
8082 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8083 * @param uSel The selector.
8084 */
8085VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
8086{
8087 /*
8088 * Get the selector table base and calculate the entry address.
8089 */
8090 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8091 ? pVCpu->cpum.GstCtx.ldtr.u64Base
8092 : pVCpu->cpum.GstCtx.gdtr.pGdt;
8093 GCPtr += uSel & X86_SEL_MASK;
8094
8095 /*
8096 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8097 * ugly stuff to avoid this. This will make sure it's an atomic access
8098 * as well more or less remove any question about 8-bit or 32-bit accesss.
8099 */
8100 VBOXSTRICTRC rcStrict;
8101 uint32_t volatile *pu32;
8102 if ((GCPtr & 3) == 0)
8103 {
8104 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8105 GCPtr += 2 + 2;
8106 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8107 if (rcStrict != VINF_SUCCESS)
8108 return rcStrict;
8109 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8110 }
8111 else
8112 {
8113 /* The misaligned GDT/LDT case, map the whole thing. */
8114 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8115 if (rcStrict != VINF_SUCCESS)
8116 return rcStrict;
8117 switch ((uintptr_t)pu32 & 3)
8118 {
8119 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8120 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8121 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8122 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8123 }
8124 }
8125
8126 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
8127}
8128
8129
8130#undef LOG_GROUP
8131#define LOG_GROUP LOG_GROUP_IEM
8132
8133/** @} */
8134
8135/** @name Opcode Helpers.
8136 * @{
8137 */
8138
8139/**
8140 * Calculates the effective address of a ModR/M memory operand.
8141 *
8142 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8143 *
8144 * @return Strict VBox status code.
8145 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8146 * @param bRm The ModRM byte.
8147 * @param cbImmAndRspOffset - First byte: The size of any immediate
8148 * following the effective address opcode bytes
8149 * (only for RIP relative addressing).
8150 * - Second byte: RSP displacement (for POP [ESP]).
8151 * @param pGCPtrEff Where to return the effective address.
8152 */
8153VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
8154{
8155 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8156# define SET_SS_DEF() \
8157 do \
8158 { \
8159 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8160 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8161 } while (0)
8162
8163 if (!IEM_IS_64BIT_CODE(pVCpu))
8164 {
8165/** @todo Check the effective address size crap! */
8166 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8167 {
8168 uint16_t u16EffAddr;
8169
8170 /* Handle the disp16 form with no registers first. */
8171 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8172 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8173 else
8174 {
8175 /* Get the displacment. */
8176 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8177 {
8178 case 0: u16EffAddr = 0; break;
8179 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8180 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8181 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8182 }
8183
8184 /* Add the base and index registers to the disp. */
8185 switch (bRm & X86_MODRM_RM_MASK)
8186 {
8187 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8188 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8189 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8190 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8191 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8192 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8193 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8194 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8195 }
8196 }
8197
8198 *pGCPtrEff = u16EffAddr;
8199 }
8200 else
8201 {
8202 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8203 uint32_t u32EffAddr;
8204
8205 /* Handle the disp32 form with no registers first. */
8206 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8207 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8208 else
8209 {
8210 /* Get the register (or SIB) value. */
8211 switch ((bRm & X86_MODRM_RM_MASK))
8212 {
8213 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8214 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8215 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8216 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8217 case 4: /* SIB */
8218 {
8219 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8220
8221 /* Get the index and scale it. */
8222 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8223 {
8224 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8225 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8226 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8227 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8228 case 4: u32EffAddr = 0; /*none */ break;
8229 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8230 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8231 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8232 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8233 }
8234 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8235
8236 /* add base */
8237 switch (bSib & X86_SIB_BASE_MASK)
8238 {
8239 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8240 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8241 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8242 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8243 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8244 case 5:
8245 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8246 {
8247 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8248 SET_SS_DEF();
8249 }
8250 else
8251 {
8252 uint32_t u32Disp;
8253 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8254 u32EffAddr += u32Disp;
8255 }
8256 break;
8257 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8258 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8259 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8260 }
8261 break;
8262 }
8263 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8264 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8265 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8266 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8267 }
8268
8269 /* Get and add the displacement. */
8270 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8271 {
8272 case 0:
8273 break;
8274 case 1:
8275 {
8276 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8277 u32EffAddr += i8Disp;
8278 break;
8279 }
8280 case 2:
8281 {
8282 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8283 u32EffAddr += u32Disp;
8284 break;
8285 }
8286 default:
8287 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8288 }
8289
8290 }
8291 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8292 *pGCPtrEff = u32EffAddr;
8293 }
8294 }
8295 else
8296 {
8297 uint64_t u64EffAddr;
8298
8299 /* Handle the rip+disp32 form with no registers first. */
8300 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8301 {
8302 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8303 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8304 }
8305 else
8306 {
8307 /* Get the register (or SIB) value. */
8308 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8309 {
8310 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8311 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8312 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8313 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8314 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8315 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8316 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8317 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8318 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8319 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8320 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8321 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8322 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8323 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8324 /* SIB */
8325 case 4:
8326 case 12:
8327 {
8328 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8329
8330 /* Get the index and scale it. */
8331 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8332 {
8333 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8334 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8335 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8336 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8337 case 4: u64EffAddr = 0; /*none */ break;
8338 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8339 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8340 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8341 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8342 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8343 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8344 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8345 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8346 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8347 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8348 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8349 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8350 }
8351 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8352
8353 /* add base */
8354 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8355 {
8356 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8357 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8358 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8359 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8360 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8361 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8362 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8363 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8364 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8365 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8366 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8367 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8368 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8369 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8370 /* complicated encodings */
8371 case 5:
8372 case 13:
8373 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8374 {
8375 if (!pVCpu->iem.s.uRexB)
8376 {
8377 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8378 SET_SS_DEF();
8379 }
8380 else
8381 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8382 }
8383 else
8384 {
8385 uint32_t u32Disp;
8386 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8387 u64EffAddr += (int32_t)u32Disp;
8388 }
8389 break;
8390 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8391 }
8392 break;
8393 }
8394 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8395 }
8396
8397 /* Get and add the displacement. */
8398 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8399 {
8400 case 0:
8401 break;
8402 case 1:
8403 {
8404 int8_t i8Disp;
8405 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8406 u64EffAddr += i8Disp;
8407 break;
8408 }
8409 case 2:
8410 {
8411 uint32_t u32Disp;
8412 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8413 u64EffAddr += (int32_t)u32Disp;
8414 break;
8415 }
8416 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8417 }
8418
8419 }
8420
8421 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8422 *pGCPtrEff = u64EffAddr;
8423 else
8424 {
8425 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8426 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8427 }
8428 }
8429
8430 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
8431 return VINF_SUCCESS;
8432}
8433
8434
8435#ifdef IEM_WITH_SETJMP
8436/**
8437 * Calculates the effective address of a ModR/M memory operand.
8438 *
8439 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8440 *
8441 * May longjmp on internal error.
8442 *
8443 * @return The effective address.
8444 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8445 * @param bRm The ModRM byte.
8446 * @param cbImmAndRspOffset - First byte: The size of any immediate
8447 * following the effective address opcode bytes
8448 * (only for RIP relative addressing).
8449 * - Second byte: RSP displacement (for POP [ESP]).
8450 */
8451RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP
8452{
8453 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
8454# define SET_SS_DEF() \
8455 do \
8456 { \
8457 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8458 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8459 } while (0)
8460
8461 if (!IEM_IS_64BIT_CODE(pVCpu))
8462 {
8463/** @todo Check the effective address size crap! */
8464 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8465 {
8466 uint16_t u16EffAddr;
8467
8468 /* Handle the disp16 form with no registers first. */
8469 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8470 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8471 else
8472 {
8473 /* Get the displacment. */
8474 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8475 {
8476 case 0: u16EffAddr = 0; break;
8477 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8478 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8479 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */
8480 }
8481
8482 /* Add the base and index registers to the disp. */
8483 switch (bRm & X86_MODRM_RM_MASK)
8484 {
8485 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8486 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8487 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8488 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8489 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8490 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8491 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8492 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8493 }
8494 }
8495
8496 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
8497 return u16EffAddr;
8498 }
8499
8500 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8501 uint32_t u32EffAddr;
8502
8503 /* Handle the disp32 form with no registers first. */
8504 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8505 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8506 else
8507 {
8508 /* Get the register (or SIB) value. */
8509 switch ((bRm & X86_MODRM_RM_MASK))
8510 {
8511 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8512 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8513 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8514 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8515 case 4: /* SIB */
8516 {
8517 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8518
8519 /* Get the index and scale it. */
8520 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8521 {
8522 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8523 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8524 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8525 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8526 case 4: u32EffAddr = 0; /*none */ break;
8527 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8528 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8529 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8530 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8531 }
8532 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8533
8534 /* add base */
8535 switch (bSib & X86_SIB_BASE_MASK)
8536 {
8537 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8538 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8539 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8540 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8541 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8542 case 5:
8543 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8544 {
8545 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8546 SET_SS_DEF();
8547 }
8548 else
8549 {
8550 uint32_t u32Disp;
8551 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8552 u32EffAddr += u32Disp;
8553 }
8554 break;
8555 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8556 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8557 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8558 }
8559 break;
8560 }
8561 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8562 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8563 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8564 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8565 }
8566
8567 /* Get and add the displacement. */
8568 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8569 {
8570 case 0:
8571 break;
8572 case 1:
8573 {
8574 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8575 u32EffAddr += i8Disp;
8576 break;
8577 }
8578 case 2:
8579 {
8580 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8581 u32EffAddr += u32Disp;
8582 break;
8583 }
8584 default:
8585 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */
8586 }
8587 }
8588
8589 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8590 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
8591 return u32EffAddr;
8592 }
8593
8594 uint64_t u64EffAddr;
8595
8596 /* Handle the rip+disp32 form with no registers first. */
8597 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8598 {
8599 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8600 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8601 }
8602 else
8603 {
8604 /* Get the register (or SIB) value. */
8605 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8606 {
8607 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8608 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8609 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8610 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8611 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8612 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8613 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8614 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8615 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8616 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8617 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8618 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8619 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8620 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8621 /* SIB */
8622 case 4:
8623 case 12:
8624 {
8625 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8626
8627 /* Get the index and scale it. */
8628 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8629 {
8630 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8631 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8632 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8633 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8634 case 4: u64EffAddr = 0; /*none */ break;
8635 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8636 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8637 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8638 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8639 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8640 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8641 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8642 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8643 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8644 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8645 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8646 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8647 }
8648 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8649
8650 /* add base */
8651 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8652 {
8653 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8654 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8655 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8656 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8657 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8658 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8659 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8660 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8661 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8662 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8663 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8664 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8665 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8666 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8667 /* complicated encodings */
8668 case 5:
8669 case 13:
8670 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8671 {
8672 if (!pVCpu->iem.s.uRexB)
8673 {
8674 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8675 SET_SS_DEF();
8676 }
8677 else
8678 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8679 }
8680 else
8681 {
8682 uint32_t u32Disp;
8683 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8684 u64EffAddr += (int32_t)u32Disp;
8685 }
8686 break;
8687 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8688 }
8689 break;
8690 }
8691 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8692 }
8693
8694 /* Get and add the displacement. */
8695 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8696 {
8697 case 0:
8698 break;
8699 case 1:
8700 {
8701 int8_t i8Disp;
8702 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8703 u64EffAddr += i8Disp;
8704 break;
8705 }
8706 case 2:
8707 {
8708 uint32_t u32Disp;
8709 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8710 u64EffAddr += (int32_t)u32Disp;
8711 break;
8712 }
8713 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
8714 }
8715
8716 }
8717
8718 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8719 {
8720 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
8721 return u64EffAddr;
8722 }
8723 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8724 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
8725 return u64EffAddr & UINT32_MAX;
8726}
8727#endif /* IEM_WITH_SETJMP */
8728
8729
8730/**
8731 * Calculates the effective address of a ModR/M memory operand, extended version
8732 * for use in the recompilers.
8733 *
8734 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8735 *
8736 * @return Strict VBox status code.
8737 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8738 * @param bRm The ModRM byte.
8739 * @param cbImmAndRspOffset - First byte: The size of any immediate
8740 * following the effective address opcode bytes
8741 * (only for RIP relative addressing).
8742 * - Second byte: RSP displacement (for POP [ESP]).
8743 * @param pGCPtrEff Where to return the effective address.
8744 * @param puInfo Extra info: 32-bit displacement (bits 31:0) and
8745 * SIB byte (bits 39:32).
8746 */
8747VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT
8748{
8749 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8750# define SET_SS_DEF() \
8751 do \
8752 { \
8753 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8754 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8755 } while (0)
8756
8757 uint64_t uInfo;
8758 if (!IEM_IS_64BIT_CODE(pVCpu))
8759 {
8760/** @todo Check the effective address size crap! */
8761 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8762 {
8763 uint16_t u16EffAddr;
8764
8765 /* Handle the disp16 form with no registers first. */
8766 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8767 {
8768 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8769 uInfo = u16EffAddr;
8770 }
8771 else
8772 {
8773 /* Get the displacment. */
8774 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8775 {
8776 case 0: u16EffAddr = 0; break;
8777 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8778 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8779 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8780 }
8781 uInfo = u16EffAddr;
8782
8783 /* Add the base and index registers to the disp. */
8784 switch (bRm & X86_MODRM_RM_MASK)
8785 {
8786 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8787 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8788 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8789 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8790 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8791 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8792 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8793 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8794 }
8795 }
8796
8797 *pGCPtrEff = u16EffAddr;
8798 }
8799 else
8800 {
8801 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8802 uint32_t u32EffAddr;
8803
8804 /* Handle the disp32 form with no registers first. */
8805 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8806 {
8807 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8808 uInfo = u32EffAddr;
8809 }
8810 else
8811 {
8812 /* Get the register (or SIB) value. */
8813 uInfo = 0;
8814 switch ((bRm & X86_MODRM_RM_MASK))
8815 {
8816 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8817 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8818 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8819 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8820 case 4: /* SIB */
8821 {
8822 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8823 uInfo = (uint64_t)bSib << 32;
8824
8825 /* Get the index and scale it. */
8826 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8827 {
8828 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8829 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8830 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8831 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8832 case 4: u32EffAddr = 0; /*none */ break;
8833 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8834 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8835 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8836 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8837 }
8838 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8839
8840 /* add base */
8841 switch (bSib & X86_SIB_BASE_MASK)
8842 {
8843 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8844 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8845 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8846 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8847 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8848 case 5:
8849 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8850 {
8851 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8852 SET_SS_DEF();
8853 }
8854 else
8855 {
8856 uint32_t u32Disp;
8857 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8858 u32EffAddr += u32Disp;
8859 uInfo |= u32Disp;
8860 }
8861 break;
8862 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8863 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8864 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8865 }
8866 break;
8867 }
8868 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8869 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8870 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8871 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8872 }
8873
8874 /* Get and add the displacement. */
8875 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8876 {
8877 case 0:
8878 break;
8879 case 1:
8880 {
8881 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8882 u32EffAddr += i8Disp;
8883 uInfo |= (uint32_t)(int32_t)i8Disp;
8884 break;
8885 }
8886 case 2:
8887 {
8888 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8889 u32EffAddr += u32Disp;
8890 uInfo |= (uint32_t)u32Disp;
8891 break;
8892 }
8893 default:
8894 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8895 }
8896
8897 }
8898 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8899 *pGCPtrEff = u32EffAddr;
8900 }
8901 }
8902 else
8903 {
8904 uint64_t u64EffAddr;
8905
8906 /* Handle the rip+disp32 form with no registers first. */
8907 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8908 {
8909 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8910 uInfo = (uint32_t)u64EffAddr;
8911 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8912 }
8913 else
8914 {
8915 /* Get the register (or SIB) value. */
8916 uInfo = 0;
8917 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8918 {
8919 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8920 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8921 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8922 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8923 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8924 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8925 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8926 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8927 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8928 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8929 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8930 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8931 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8932 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8933 /* SIB */
8934 case 4:
8935 case 12:
8936 {
8937 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8938 uInfo = (uint64_t)bSib << 32;
8939
8940 /* Get the index and scale it. */
8941 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8942 {
8943 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8944 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8945 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8946 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8947 case 4: u64EffAddr = 0; /*none */ break;
8948 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8949 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8950 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8951 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8952 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8953 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8954 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8955 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8956 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8957 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8958 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8959 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8960 }
8961 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8962
8963 /* add base */
8964 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8965 {
8966 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8967 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8968 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8969 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8970 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8971 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8972 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8973 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8974 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8975 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8976 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8977 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8978 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8979 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8980 /* complicated encodings */
8981 case 5:
8982 case 13:
8983 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8984 {
8985 if (!pVCpu->iem.s.uRexB)
8986 {
8987 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8988 SET_SS_DEF();
8989 }
8990 else
8991 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8992 }
8993 else
8994 {
8995 uint32_t u32Disp;
8996 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8997 u64EffAddr += (int32_t)u32Disp;
8998 uInfo |= u32Disp;
8999 }
9000 break;
9001 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9002 }
9003 break;
9004 }
9005 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9006 }
9007
9008 /* Get and add the displacement. */
9009 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9010 {
9011 case 0:
9012 break;
9013 case 1:
9014 {
9015 int8_t i8Disp;
9016 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9017 u64EffAddr += i8Disp;
9018 uInfo |= (uint32_t)(int32_t)i8Disp;
9019 break;
9020 }
9021 case 2:
9022 {
9023 uint32_t u32Disp;
9024 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9025 u64EffAddr += (int32_t)u32Disp;
9026 uInfo |= u32Disp;
9027 break;
9028 }
9029 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9030 }
9031
9032 }
9033
9034 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9035 *pGCPtrEff = u64EffAddr;
9036 else
9037 {
9038 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9039 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9040 }
9041 }
9042 *puInfo = uInfo;
9043
9044 Log5(("iemOpHlpCalcRmEffAddrEx: EffAddr=%#010RGv uInfo=%RX64\n", *pGCPtrEff, uInfo));
9045 return VINF_SUCCESS;
9046}
9047
9048/** @} */
9049
9050
9051#ifdef LOG_ENABLED
9052/**
9053 * Logs the current instruction.
9054 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9055 * @param fSameCtx Set if we have the same context information as the VMM,
9056 * clear if we may have already executed an instruction in
9057 * our debug context. When clear, we assume IEMCPU holds
9058 * valid CPU mode info.
9059 *
9060 * The @a fSameCtx parameter is now misleading and obsolete.
9061 * @param pszFunction The IEM function doing the execution.
9062 */
9063static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
9064{
9065# ifdef IN_RING3
9066 if (LogIs2Enabled())
9067 {
9068 char szInstr[256];
9069 uint32_t cbInstr = 0;
9070 if (fSameCtx)
9071 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
9072 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9073 szInstr, sizeof(szInstr), &cbInstr);
9074 else
9075 {
9076 uint32_t fFlags = 0;
9077 switch (IEM_GET_CPU_MODE(pVCpu))
9078 {
9079 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
9080 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
9081 case IEMMODE_16BIT:
9082 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
9083 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
9084 else
9085 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
9086 break;
9087 }
9088 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
9089 szInstr, sizeof(szInstr), &cbInstr);
9090 }
9091
9092 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9093 Log2(("**** %s fExec=%x\n"
9094 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
9095 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
9096 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
9097 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
9098 " %s\n"
9099 , pszFunction, pVCpu->iem.s.fExec,
9100 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
9101 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
9102 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
9103 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
9104 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
9105 szInstr));
9106
9107 /* This stuff sucks atm. as it fills the log with MSRs. */
9108 //if (LogIs3Enabled())
9109 // DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
9110 }
9111 else
9112# endif
9113 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
9114 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
9115 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
9116}
9117#endif /* LOG_ENABLED */
9118
9119
9120#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9121/**
9122 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
9123 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
9124 *
9125 * @returns Modified rcStrict.
9126 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9127 * @param rcStrict The instruction execution status.
9128 */
9129static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
9130{
9131 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
9132 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
9133 {
9134 /* VMX preemption timer takes priority over NMI-window exits. */
9135 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
9136 {
9137 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
9138 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
9139 }
9140 /*
9141 * Check remaining intercepts.
9142 *
9143 * NMI-window and Interrupt-window VM-exits.
9144 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
9145 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
9146 *
9147 * See Intel spec. 26.7.6 "NMI-Window Exiting".
9148 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
9149 */
9150 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
9151 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9152 && !TRPMHasTrap(pVCpu))
9153 {
9154 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
9155 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
9156 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
9157 {
9158 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
9159 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
9160 }
9161 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
9162 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
9163 {
9164 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
9165 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
9166 }
9167 }
9168 }
9169 /* TPR-below threshold/APIC write has the highest priority. */
9170 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
9171 {
9172 rcStrict = iemVmxApicWriteEmulation(pVCpu);
9173 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9174 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
9175 }
9176 /* MTF takes priority over VMX-preemption timer. */
9177 else
9178 {
9179 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
9180 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9181 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
9182 }
9183 return rcStrict;
9184}
9185#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9186
9187
9188/**
9189 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
9190 * IEMExecOneWithPrefetchedByPC.
9191 *
9192 * Similar code is found in IEMExecLots.
9193 *
9194 * @return Strict VBox status code.
9195 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9196 * @param fExecuteInhibit If set, execute the instruction following CLI,
9197 * POP SS and MOV SS,GR.
9198 * @param pszFunction The calling function name.
9199 */
9200DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
9201{
9202 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9203 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9204 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9205 RT_NOREF_PV(pszFunction);
9206
9207#ifdef IEM_WITH_SETJMP
9208 VBOXSTRICTRC rcStrict;
9209 IEM_TRY_SETJMP(pVCpu, rcStrict)
9210 {
9211 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9212 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9213 }
9214 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9215 {
9216 pVCpu->iem.s.cLongJumps++;
9217 }
9218 IEM_CATCH_LONGJMP_END(pVCpu);
9219#else
9220 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9221 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9222#endif
9223 if (rcStrict == VINF_SUCCESS)
9224 pVCpu->iem.s.cInstructions++;
9225 if (pVCpu->iem.s.cActiveMappings > 0)
9226 {
9227 Assert(rcStrict != VINF_SUCCESS);
9228 iemMemRollback(pVCpu);
9229 }
9230 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9231 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9232 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9233
9234//#ifdef DEBUG
9235// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
9236//#endif
9237
9238#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9239 /*
9240 * Perform any VMX nested-guest instruction boundary actions.
9241 *
9242 * If any of these causes a VM-exit, we must skip executing the next
9243 * instruction (would run into stale page tables). A VM-exit makes sure
9244 * there is no interrupt-inhibition, so that should ensure we don't go
9245 * to try execute the next instruction. Clearing fExecuteInhibit is
9246 * problematic because of the setjmp/longjmp clobbering above.
9247 */
9248 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9249 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
9250 || rcStrict != VINF_SUCCESS)
9251 { /* likely */ }
9252 else
9253 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9254#endif
9255
9256 /* Execute the next instruction as well if a cli, pop ss or
9257 mov ss, Gr has just completed successfully. */
9258 if ( fExecuteInhibit
9259 && rcStrict == VINF_SUCCESS
9260 && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
9261 {
9262 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fExec & (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK));
9263 if (rcStrict == VINF_SUCCESS)
9264 {
9265#ifdef LOG_ENABLED
9266 iemLogCurInstr(pVCpu, false, pszFunction);
9267#endif
9268#ifdef IEM_WITH_SETJMP
9269 IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
9270 {
9271 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9272 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9273 }
9274 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9275 {
9276 pVCpu->iem.s.cLongJumps++;
9277 }
9278 IEM_CATCH_LONGJMP_END(pVCpu);
9279#else
9280 IEM_OPCODE_GET_FIRST_U8(&b);
9281 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9282#endif
9283 if (rcStrict == VINF_SUCCESS)
9284 {
9285 pVCpu->iem.s.cInstructions++;
9286#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9287 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9288 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
9289 { /* likely */ }
9290 else
9291 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9292#endif
9293 }
9294 if (pVCpu->iem.s.cActiveMappings > 0)
9295 {
9296 Assert(rcStrict != VINF_SUCCESS);
9297 iemMemRollback(pVCpu);
9298 }
9299 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9300 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9301 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9302 }
9303 else if (pVCpu->iem.s.cActiveMappings > 0)
9304 iemMemRollback(pVCpu);
9305 /** @todo drop this after we bake this change into RIP advancing. */
9306 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
9307 }
9308
9309 /*
9310 * Return value fiddling, statistics and sanity assertions.
9311 */
9312 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9313
9314 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9315 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9316 return rcStrict;
9317}
9318
9319
9320/**
9321 * Execute one instruction.
9322 *
9323 * @return Strict VBox status code.
9324 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9325 */
9326VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
9327{
9328 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
9329#ifdef LOG_ENABLED
9330 iemLogCurInstr(pVCpu, true, "IEMExecOne");
9331#endif
9332
9333 /*
9334 * Do the decoding and emulation.
9335 */
9336 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9337 if (rcStrict == VINF_SUCCESS)
9338 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
9339 else if (pVCpu->iem.s.cActiveMappings > 0)
9340 iemMemRollback(pVCpu);
9341
9342 if (rcStrict != VINF_SUCCESS)
9343 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9344 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9345 return rcStrict;
9346}
9347
9348
9349VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9350{
9351 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9352 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9353 if (rcStrict == VINF_SUCCESS)
9354 {
9355 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
9356 if (pcbWritten)
9357 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9358 }
9359 else if (pVCpu->iem.s.cActiveMappings > 0)
9360 iemMemRollback(pVCpu);
9361
9362 return rcStrict;
9363}
9364
9365
9366VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9367 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9368{
9369 VBOXSTRICTRC rcStrict;
9370 if ( cbOpcodeBytes
9371 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9372 {
9373 iemInitDecoder(pVCpu, 0 /*fExecOpts*/);
9374#ifdef IEM_WITH_CODE_TLB
9375 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9376 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9377 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9378 pVCpu->iem.s.offCurInstrStart = 0;
9379 pVCpu->iem.s.offInstrNextByte = 0;
9380 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9381#else
9382 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9383 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9384#endif
9385 rcStrict = VINF_SUCCESS;
9386 }
9387 else
9388 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9389 if (rcStrict == VINF_SUCCESS)
9390 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
9391 else if (pVCpu->iem.s.cActiveMappings > 0)
9392 iemMemRollback(pVCpu);
9393
9394 return rcStrict;
9395}
9396
9397
9398VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9399{
9400 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9401 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9402 if (rcStrict == VINF_SUCCESS)
9403 {
9404 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
9405 if (pcbWritten)
9406 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9407 }
9408 else if (pVCpu->iem.s.cActiveMappings > 0)
9409 iemMemRollback(pVCpu);
9410
9411 return rcStrict;
9412}
9413
9414
9415VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9416 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9417{
9418 VBOXSTRICTRC rcStrict;
9419 if ( cbOpcodeBytes
9420 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9421 {
9422 iemInitDecoder(pVCpu, IEM_F_BYPASS_HANDLERS);
9423#ifdef IEM_WITH_CODE_TLB
9424 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9425 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9426 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9427 pVCpu->iem.s.offCurInstrStart = 0;
9428 pVCpu->iem.s.offInstrNextByte = 0;
9429 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9430#else
9431 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9432 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9433#endif
9434 rcStrict = VINF_SUCCESS;
9435 }
9436 else
9437 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9438 if (rcStrict == VINF_SUCCESS)
9439 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
9440 else if (pVCpu->iem.s.cActiveMappings > 0)
9441 iemMemRollback(pVCpu);
9442
9443 return rcStrict;
9444}
9445
9446
9447/**
9448 * For handling split cacheline lock operations when the host has split-lock
9449 * detection enabled.
9450 *
9451 * This will cause the interpreter to disregard the lock prefix and implicit
9452 * locking (xchg).
9453 *
9454 * @returns Strict VBox status code.
9455 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9456 */
9457VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
9458{
9459 /*
9460 * Do the decoding and emulation.
9461 */
9462 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_X86_DISREGARD_LOCK);
9463 if (rcStrict == VINF_SUCCESS)
9464 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
9465 else if (pVCpu->iem.s.cActiveMappings > 0)
9466 iemMemRollback(pVCpu);
9467
9468 if (rcStrict != VINF_SUCCESS)
9469 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9470 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9471 return rcStrict;
9472}
9473
9474
9475/**
9476 * Code common to IEMExecLots and IEMExecRecompilerThreaded that attempts to
9477 * inject a pending TRPM trap.
9478 */
9479VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu)
9480{
9481 Assert(TRPMHasTrap(pVCpu));
9482
9483 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9484 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
9485 {
9486 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
9487#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9488 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
9489 if (fIntrEnabled)
9490 {
9491 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
9492 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9493 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
9494 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
9495 else
9496 {
9497 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
9498 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
9499 }
9500 }
9501#else
9502 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9503#endif
9504 if (fIntrEnabled)
9505 {
9506 uint8_t u8TrapNo;
9507 TRPMEVENT enmType;
9508 uint32_t uErrCode;
9509 RTGCPTR uCr2;
9510 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
9511 AssertRC(rc2);
9512 Assert(enmType == TRPM_HARDWARE_INT);
9513 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
9514
9515 TRPMResetTrap(pVCpu);
9516
9517#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9518 /* Injecting an event may cause a VM-exit. */
9519 if ( rcStrict != VINF_SUCCESS
9520 && rcStrict != VINF_IEM_RAISED_XCPT)
9521 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
9522#else
9523 NOREF(rcStrict);
9524#endif
9525 }
9526 }
9527
9528 return VINF_SUCCESS;
9529}
9530
9531
9532VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
9533{
9534 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
9535 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
9536 Assert(cMaxInstructions > 0);
9537
9538 /*
9539 * See if there is an interrupt pending in TRPM, inject it if we can.
9540 */
9541 /** @todo What if we are injecting an exception and not an interrupt? Is that
9542 * possible here? For now we assert it is indeed only an interrupt. */
9543 if (!TRPMHasTrap(pVCpu))
9544 { /* likely */ }
9545 else
9546 {
9547 VBOXSTRICTRC rcStrict = iemExecInjectPendingTrap(pVCpu);
9548 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9549 { /*likely */ }
9550 else
9551 return rcStrict;
9552 }
9553
9554 /*
9555 * Initial decoder init w/ prefetch, then setup setjmp.
9556 */
9557 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9558 if (rcStrict == VINF_SUCCESS)
9559 {
9560#ifdef IEM_WITH_SETJMP
9561 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
9562 IEM_TRY_SETJMP(pVCpu, rcStrict)
9563#endif
9564 {
9565 /*
9566 * The run loop. We limit ourselves to 4096 instructions right now.
9567 */
9568 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
9569 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9570 for (;;)
9571 {
9572 /*
9573 * Log the state.
9574 */
9575#ifdef LOG_ENABLED
9576 iemLogCurInstr(pVCpu, true, "IEMExecLots");
9577#endif
9578
9579 /*
9580 * Do the decoding and emulation.
9581 */
9582 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9583 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9584#ifdef VBOX_STRICT
9585 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
9586#endif
9587 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9588 {
9589 Assert(pVCpu->iem.s.cActiveMappings == 0);
9590 pVCpu->iem.s.cInstructions++;
9591
9592#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9593 /* Perform any VMX nested-guest instruction boundary actions. */
9594 uint64_t fCpu = pVCpu->fLocalForcedActions;
9595 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9596 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
9597 { /* likely */ }
9598 else
9599 {
9600 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9601 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9602 fCpu = pVCpu->fLocalForcedActions;
9603 else
9604 {
9605 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9606 break;
9607 }
9608 }
9609#endif
9610 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9611 {
9612#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
9613 uint64_t fCpu = pVCpu->fLocalForcedActions;
9614#endif
9615 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9616 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9617 | VMCPU_FF_TLB_FLUSH
9618 | VMCPU_FF_UNHALT );
9619
9620 if (RT_LIKELY( ( !fCpu
9621 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9622 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
9623 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
9624 {
9625 if (--cMaxInstructionsGccStupidity > 0)
9626 {
9627 /* Poll timers every now an then according to the caller's specs. */
9628 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
9629 || !TMTimerPollBool(pVM, pVCpu))
9630 {
9631 Assert(pVCpu->iem.s.cActiveMappings == 0);
9632 iemReInitDecoder(pVCpu);
9633 continue;
9634 }
9635 }
9636 }
9637 }
9638 Assert(pVCpu->iem.s.cActiveMappings == 0);
9639 }
9640 else if (pVCpu->iem.s.cActiveMappings > 0)
9641 iemMemRollback(pVCpu);
9642 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9643 break;
9644 }
9645 }
9646#ifdef IEM_WITH_SETJMP
9647 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9648 {
9649 if (pVCpu->iem.s.cActiveMappings > 0)
9650 iemMemRollback(pVCpu);
9651# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9652 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9653# endif
9654 pVCpu->iem.s.cLongJumps++;
9655 }
9656 IEM_CATCH_LONGJMP_END(pVCpu);
9657#endif
9658
9659 /*
9660 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9661 */
9662 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9663 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9664 }
9665 else
9666 {
9667 if (pVCpu->iem.s.cActiveMappings > 0)
9668 iemMemRollback(pVCpu);
9669
9670#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9671 /*
9672 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9673 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9674 */
9675 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9676#endif
9677 }
9678
9679 /*
9680 * Maybe re-enter raw-mode and log.
9681 */
9682 if (rcStrict != VINF_SUCCESS)
9683 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9684 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9685 if (pcInstructions)
9686 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
9687 return rcStrict;
9688}
9689
9690
9691/**
9692 * Interface used by EMExecuteExec, does exit statistics and limits.
9693 *
9694 * @returns Strict VBox status code.
9695 * @param pVCpu The cross context virtual CPU structure.
9696 * @param fWillExit To be defined.
9697 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
9698 * @param cMaxInstructions Maximum number of instructions to execute.
9699 * @param cMaxInstructionsWithoutExits
9700 * The max number of instructions without exits.
9701 * @param pStats Where to return statistics.
9702 */
9703VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
9704 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
9705{
9706 NOREF(fWillExit); /** @todo define flexible exit crits */
9707
9708 /*
9709 * Initialize return stats.
9710 */
9711 pStats->cInstructions = 0;
9712 pStats->cExits = 0;
9713 pStats->cMaxExitDistance = 0;
9714 pStats->cReserved = 0;
9715
9716 /*
9717 * Initial decoder init w/ prefetch, then setup setjmp.
9718 */
9719 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9720 if (rcStrict == VINF_SUCCESS)
9721 {
9722#ifdef IEM_WITH_SETJMP
9723 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf?!? */
9724 IEM_TRY_SETJMP(pVCpu, rcStrict)
9725#endif
9726 {
9727#ifdef IN_RING0
9728 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
9729#endif
9730 uint32_t cInstructionSinceLastExit = 0;
9731
9732 /*
9733 * The run loop. We limit ourselves to 4096 instructions right now.
9734 */
9735 PVM pVM = pVCpu->CTX_SUFF(pVM);
9736 for (;;)
9737 {
9738 /*
9739 * Log the state.
9740 */
9741#ifdef LOG_ENABLED
9742 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
9743#endif
9744
9745 /*
9746 * Do the decoding and emulation.
9747 */
9748 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
9749
9750 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9751 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9752
9753 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
9754 && cInstructionSinceLastExit > 0 /* don't count the first */ )
9755 {
9756 pStats->cExits += 1;
9757 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
9758 pStats->cMaxExitDistance = cInstructionSinceLastExit;
9759 cInstructionSinceLastExit = 0;
9760 }
9761
9762 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9763 {
9764 Assert(pVCpu->iem.s.cActiveMappings == 0);
9765 pVCpu->iem.s.cInstructions++;
9766 pStats->cInstructions++;
9767 cInstructionSinceLastExit++;
9768
9769#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9770 /* Perform any VMX nested-guest instruction boundary actions. */
9771 uint64_t fCpu = pVCpu->fLocalForcedActions;
9772 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9773 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
9774 { /* likely */ }
9775 else
9776 {
9777 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9778 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9779 fCpu = pVCpu->fLocalForcedActions;
9780 else
9781 {
9782 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9783 break;
9784 }
9785 }
9786#endif
9787 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9788 {
9789#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
9790 uint64_t fCpu = pVCpu->fLocalForcedActions;
9791#endif
9792 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9793 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9794 | VMCPU_FF_TLB_FLUSH
9795 | VMCPU_FF_UNHALT );
9796 if (RT_LIKELY( ( ( !fCpu
9797 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9798 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
9799 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
9800 || pStats->cInstructions < cMinInstructions))
9801 {
9802 if (pStats->cInstructions < cMaxInstructions)
9803 {
9804 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
9805 {
9806#ifdef IN_RING0
9807 if ( !fCheckPreemptionPending
9808 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
9809#endif
9810 {
9811 Assert(pVCpu->iem.s.cActiveMappings == 0);
9812 iemReInitDecoder(pVCpu);
9813 continue;
9814 }
9815#ifdef IN_RING0
9816 rcStrict = VINF_EM_RAW_INTERRUPT;
9817 break;
9818#endif
9819 }
9820 }
9821 }
9822 Assert(!(fCpu & VMCPU_FF_IEM));
9823 }
9824 Assert(pVCpu->iem.s.cActiveMappings == 0);
9825 }
9826 else if (pVCpu->iem.s.cActiveMappings > 0)
9827 iemMemRollback(pVCpu);
9828 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9829 break;
9830 }
9831 }
9832#ifdef IEM_WITH_SETJMP
9833 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9834 {
9835 if (pVCpu->iem.s.cActiveMappings > 0)
9836 iemMemRollback(pVCpu);
9837 pVCpu->iem.s.cLongJumps++;
9838 }
9839 IEM_CATCH_LONGJMP_END(pVCpu);
9840#endif
9841
9842 /*
9843 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9844 */
9845 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9846 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9847 }
9848 else
9849 {
9850 if (pVCpu->iem.s.cActiveMappings > 0)
9851 iemMemRollback(pVCpu);
9852
9853#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9854 /*
9855 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9856 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9857 */
9858 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9859#endif
9860 }
9861
9862 /*
9863 * Maybe re-enter raw-mode and log.
9864 */
9865 if (rcStrict != VINF_SUCCESS)
9866 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
9867 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
9868 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
9869 return rcStrict;
9870}
9871
9872
9873/**
9874 * Injects a trap, fault, abort, software interrupt or external interrupt.
9875 *
9876 * The parameter list matches TRPMQueryTrapAll pretty closely.
9877 *
9878 * @returns Strict VBox status code.
9879 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9880 * @param u8TrapNo The trap number.
9881 * @param enmType What type is it (trap/fault/abort), software
9882 * interrupt or hardware interrupt.
9883 * @param uErrCode The error code if applicable.
9884 * @param uCr2 The CR2 value if applicable.
9885 * @param cbInstr The instruction length (only relevant for
9886 * software interrupts).
9887 */
9888VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
9889 uint8_t cbInstr)
9890{
9891 iemInitDecoder(pVCpu, 0 /*fExecOpts*/); /** @todo wrong init function! */
9892#ifdef DBGFTRACE_ENABLED
9893 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
9894 u8TrapNo, enmType, uErrCode, uCr2);
9895#endif
9896
9897 uint32_t fFlags;
9898 switch (enmType)
9899 {
9900 case TRPM_HARDWARE_INT:
9901 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
9902 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
9903 uErrCode = uCr2 = 0;
9904 break;
9905
9906 case TRPM_SOFTWARE_INT:
9907 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
9908 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
9909 uErrCode = uCr2 = 0;
9910 break;
9911
9912 case TRPM_TRAP:
9913 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
9914 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
9915 if (u8TrapNo == X86_XCPT_PF)
9916 fFlags |= IEM_XCPT_FLAGS_CR2;
9917 switch (u8TrapNo)
9918 {
9919 case X86_XCPT_DF:
9920 case X86_XCPT_TS:
9921 case X86_XCPT_NP:
9922 case X86_XCPT_SS:
9923 case X86_XCPT_PF:
9924 case X86_XCPT_AC:
9925 case X86_XCPT_GP:
9926 fFlags |= IEM_XCPT_FLAGS_ERR;
9927 break;
9928 }
9929 break;
9930
9931 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9932 }
9933
9934 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
9935
9936 if (pVCpu->iem.s.cActiveMappings > 0)
9937 iemMemRollback(pVCpu);
9938
9939 return rcStrict;
9940}
9941
9942
9943/**
9944 * Injects the active TRPM event.
9945 *
9946 * @returns Strict VBox status code.
9947 * @param pVCpu The cross context virtual CPU structure.
9948 */
9949VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
9950{
9951#ifndef IEM_IMPLEMENTS_TASKSWITCH
9952 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
9953#else
9954 uint8_t u8TrapNo;
9955 TRPMEVENT enmType;
9956 uint32_t uErrCode;
9957 RTGCUINTPTR uCr2;
9958 uint8_t cbInstr;
9959 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
9960 if (RT_FAILURE(rc))
9961 return rc;
9962
9963 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
9964 * ICEBP \#DB injection as a special case. */
9965 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
9966#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
9967 if (rcStrict == VINF_SVM_VMEXIT)
9968 rcStrict = VINF_SUCCESS;
9969#endif
9970#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9971 if (rcStrict == VINF_VMX_VMEXIT)
9972 rcStrict = VINF_SUCCESS;
9973#endif
9974 /** @todo Are there any other codes that imply the event was successfully
9975 * delivered to the guest? See @bugref{6607}. */
9976 if ( rcStrict == VINF_SUCCESS
9977 || rcStrict == VINF_IEM_RAISED_XCPT)
9978 TRPMResetTrap(pVCpu);
9979
9980 return rcStrict;
9981#endif
9982}
9983
9984
9985VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
9986{
9987 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
9988 return VERR_NOT_IMPLEMENTED;
9989}
9990
9991
9992VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
9993{
9994 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
9995 return VERR_NOT_IMPLEMENTED;
9996}
9997
9998
9999/**
10000 * Interface for HM and EM for executing string I/O OUT (write) instructions.
10001 *
10002 * This API ASSUMES that the caller has already verified that the guest code is
10003 * allowed to access the I/O port. (The I/O port is in the DX register in the
10004 * guest state.)
10005 *
10006 * @returns Strict VBox status code.
10007 * @param pVCpu The cross context virtual CPU structure.
10008 * @param cbValue The size of the I/O port access (1, 2, or 4).
10009 * @param enmAddrMode The addressing mode.
10010 * @param fRepPrefix Indicates whether a repeat prefix is used
10011 * (doesn't matter which for this instruction).
10012 * @param cbInstr The instruction length in bytes.
10013 * @param iEffSeg The effective segment address.
10014 * @param fIoChecked Whether the access to the I/O port has been
10015 * checked or not. It's typically checked in the
10016 * HM scenario.
10017 */
10018VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10019 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
10020{
10021 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
10022 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10023
10024 /*
10025 * State init.
10026 */
10027 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10028
10029 /*
10030 * Switch orgy for getting to the right handler.
10031 */
10032 VBOXSTRICTRC rcStrict;
10033 if (fRepPrefix)
10034 {
10035 switch (enmAddrMode)
10036 {
10037 case IEMMODE_16BIT:
10038 switch (cbValue)
10039 {
10040 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10041 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10042 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10043 default:
10044 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10045 }
10046 break;
10047
10048 case IEMMODE_32BIT:
10049 switch (cbValue)
10050 {
10051 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10052 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10053 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10054 default:
10055 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10056 }
10057 break;
10058
10059 case IEMMODE_64BIT:
10060 switch (cbValue)
10061 {
10062 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10063 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10064 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10065 default:
10066 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10067 }
10068 break;
10069
10070 default:
10071 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10072 }
10073 }
10074 else
10075 {
10076 switch (enmAddrMode)
10077 {
10078 case IEMMODE_16BIT:
10079 switch (cbValue)
10080 {
10081 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10082 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10083 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10084 default:
10085 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10086 }
10087 break;
10088
10089 case IEMMODE_32BIT:
10090 switch (cbValue)
10091 {
10092 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10093 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10094 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10095 default:
10096 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10097 }
10098 break;
10099
10100 case IEMMODE_64BIT:
10101 switch (cbValue)
10102 {
10103 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10104 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10105 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10106 default:
10107 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10108 }
10109 break;
10110
10111 default:
10112 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10113 }
10114 }
10115
10116 if (pVCpu->iem.s.cActiveMappings)
10117 iemMemRollback(pVCpu);
10118
10119 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10120}
10121
10122
10123/**
10124 * Interface for HM and EM for executing string I/O IN (read) instructions.
10125 *
10126 * This API ASSUMES that the caller has already verified that the guest code is
10127 * allowed to access the I/O port. (The I/O port is in the DX register in the
10128 * guest state.)
10129 *
10130 * @returns Strict VBox status code.
10131 * @param pVCpu The cross context virtual CPU structure.
10132 * @param cbValue The size of the I/O port access (1, 2, or 4).
10133 * @param enmAddrMode The addressing mode.
10134 * @param fRepPrefix Indicates whether a repeat prefix is used
10135 * (doesn't matter which for this instruction).
10136 * @param cbInstr The instruction length in bytes.
10137 * @param fIoChecked Whether the access to the I/O port has been
10138 * checked or not. It's typically checked in the
10139 * HM scenario.
10140 */
10141VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10142 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
10143{
10144 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10145
10146 /*
10147 * State init.
10148 */
10149 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10150
10151 /*
10152 * Switch orgy for getting to the right handler.
10153 */
10154 VBOXSTRICTRC rcStrict;
10155 if (fRepPrefix)
10156 {
10157 switch (enmAddrMode)
10158 {
10159 case IEMMODE_16BIT:
10160 switch (cbValue)
10161 {
10162 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10163 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10164 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10165 default:
10166 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10167 }
10168 break;
10169
10170 case IEMMODE_32BIT:
10171 switch (cbValue)
10172 {
10173 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10174 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10175 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10176 default:
10177 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10178 }
10179 break;
10180
10181 case IEMMODE_64BIT:
10182 switch (cbValue)
10183 {
10184 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10185 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10186 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10187 default:
10188 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10189 }
10190 break;
10191
10192 default:
10193 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10194 }
10195 }
10196 else
10197 {
10198 switch (enmAddrMode)
10199 {
10200 case IEMMODE_16BIT:
10201 switch (cbValue)
10202 {
10203 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10204 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10205 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10206 default:
10207 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10208 }
10209 break;
10210
10211 case IEMMODE_32BIT:
10212 switch (cbValue)
10213 {
10214 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10215 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10216 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10217 default:
10218 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10219 }
10220 break;
10221
10222 case IEMMODE_64BIT:
10223 switch (cbValue)
10224 {
10225 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10226 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10227 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10228 default:
10229 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10230 }
10231 break;
10232
10233 default:
10234 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10235 }
10236 }
10237
10238 if ( pVCpu->iem.s.cActiveMappings == 0
10239 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
10240 { /* likely */ }
10241 else
10242 {
10243 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
10244 iemMemRollback(pVCpu);
10245 }
10246 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10247}
10248
10249
10250/**
10251 * Interface for rawmode to write execute an OUT instruction.
10252 *
10253 * @returns Strict VBox status code.
10254 * @param pVCpu The cross context virtual CPU structure.
10255 * @param cbInstr The instruction length in bytes.
10256 * @param u16Port The port to read.
10257 * @param fImm Whether the port is specified using an immediate operand or
10258 * using the implicit DX register.
10259 * @param cbReg The register size.
10260 *
10261 * @remarks In ring-0 not all of the state needs to be synced in.
10262 */
10263VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10264{
10265 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10266 Assert(cbReg <= 4 && cbReg != 3);
10267
10268 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10269 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, cbReg,
10270 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10271 Assert(!pVCpu->iem.s.cActiveMappings);
10272 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10273}
10274
10275
10276/**
10277 * Interface for rawmode to write execute an IN instruction.
10278 *
10279 * @returns Strict VBox status code.
10280 * @param pVCpu The cross context virtual CPU structure.
10281 * @param cbInstr The instruction length in bytes.
10282 * @param u16Port The port to read.
10283 * @param fImm Whether the port is specified using an immediate operand or
10284 * using the implicit DX.
10285 * @param cbReg The register size.
10286 */
10287VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10288{
10289 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10290 Assert(cbReg <= 4 && cbReg != 3);
10291
10292 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10293 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, cbReg,
10294 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10295 Assert(!pVCpu->iem.s.cActiveMappings);
10296 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10297}
10298
10299
10300/**
10301 * Interface for HM and EM to write to a CRx register.
10302 *
10303 * @returns Strict VBox status code.
10304 * @param pVCpu The cross context virtual CPU structure.
10305 * @param cbInstr The instruction length in bytes.
10306 * @param iCrReg The control register number (destination).
10307 * @param iGReg The general purpose register number (source).
10308 *
10309 * @remarks In ring-0 not all of the state needs to be synced in.
10310 */
10311VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
10312{
10313 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10314 Assert(iCrReg < 16);
10315 Assert(iGReg < 16);
10316
10317 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10318 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
10319 Assert(!pVCpu->iem.s.cActiveMappings);
10320 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10321}
10322
10323
10324/**
10325 * Interface for HM and EM to read from a CRx register.
10326 *
10327 * @returns Strict VBox status code.
10328 * @param pVCpu The cross context virtual CPU structure.
10329 * @param cbInstr The instruction length in bytes.
10330 * @param iGReg The general purpose register number (destination).
10331 * @param iCrReg The control register number (source).
10332 *
10333 * @remarks In ring-0 not all of the state needs to be synced in.
10334 */
10335VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
10336{
10337 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10338 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
10339 | CPUMCTX_EXTRN_APIC_TPR);
10340 Assert(iCrReg < 16);
10341 Assert(iGReg < 16);
10342
10343 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10344 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
10345 Assert(!pVCpu->iem.s.cActiveMappings);
10346 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10347}
10348
10349
10350/**
10351 * Interface for HM and EM to write to a DRx register.
10352 *
10353 * @returns Strict VBox status code.
10354 * @param pVCpu The cross context virtual CPU structure.
10355 * @param cbInstr The instruction length in bytes.
10356 * @param iDrReg The debug register number (destination).
10357 * @param iGReg The general purpose register number (source).
10358 *
10359 * @remarks In ring-0 not all of the state needs to be synced in.
10360 */
10361VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iDrReg, uint8_t iGReg)
10362{
10363 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10364 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10365 Assert(iDrReg < 8);
10366 Assert(iGReg < 16);
10367
10368 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10369 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Dd_Rd, iDrReg, iGReg);
10370 Assert(!pVCpu->iem.s.cActiveMappings);
10371 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10372}
10373
10374
10375/**
10376 * Interface for HM and EM to read from a DRx register.
10377 *
10378 * @returns Strict VBox status code.
10379 * @param pVCpu The cross context virtual CPU structure.
10380 * @param cbInstr The instruction length in bytes.
10381 * @param iGReg The general purpose register number (destination).
10382 * @param iDrReg The debug register number (source).
10383 *
10384 * @remarks In ring-0 not all of the state needs to be synced in.
10385 */
10386VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iDrReg)
10387{
10388 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10389 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10390 Assert(iDrReg < 8);
10391 Assert(iGReg < 16);
10392
10393 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10394 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Dd, iGReg, iDrReg);
10395 Assert(!pVCpu->iem.s.cActiveMappings);
10396 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10397}
10398
10399
10400/**
10401 * Interface for HM and EM to clear the CR0[TS] bit.
10402 *
10403 * @returns Strict VBox status code.
10404 * @param pVCpu The cross context virtual CPU structure.
10405 * @param cbInstr The instruction length in bytes.
10406 *
10407 * @remarks In ring-0 not all of the state needs to be synced in.
10408 */
10409VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
10410{
10411 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10412
10413 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10414 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
10415 Assert(!pVCpu->iem.s.cActiveMappings);
10416 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10417}
10418
10419
10420/**
10421 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
10422 *
10423 * @returns Strict VBox status code.
10424 * @param pVCpu The cross context virtual CPU structure.
10425 * @param cbInstr The instruction length in bytes.
10426 * @param uValue The value to load into CR0.
10427 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
10428 * memory operand. Otherwise pass NIL_RTGCPTR.
10429 *
10430 * @remarks In ring-0 not all of the state needs to be synced in.
10431 */
10432VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
10433{
10434 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10435
10436 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10437 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
10438 Assert(!pVCpu->iem.s.cActiveMappings);
10439 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10440}
10441
10442
10443/**
10444 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
10445 *
10446 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
10447 *
10448 * @returns Strict VBox status code.
10449 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10450 * @param cbInstr The instruction length in bytes.
10451 * @remarks In ring-0 not all of the state needs to be synced in.
10452 * @thread EMT(pVCpu)
10453 */
10454VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
10455{
10456 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10457
10458 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10459 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
10460 Assert(!pVCpu->iem.s.cActiveMappings);
10461 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10462}
10463
10464
10465/**
10466 * Interface for HM and EM to emulate the WBINVD instruction.
10467 *
10468 * @returns Strict VBox status code.
10469 * @param pVCpu The cross context virtual CPU structure.
10470 * @param cbInstr The instruction length in bytes.
10471 *
10472 * @remarks In ring-0 not all of the state needs to be synced in.
10473 */
10474VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10475{
10476 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10477
10478 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10479 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
10480 Assert(!pVCpu->iem.s.cActiveMappings);
10481 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10482}
10483
10484
10485/**
10486 * Interface for HM and EM to emulate the INVD instruction.
10487 *
10488 * @returns Strict VBox status code.
10489 * @param pVCpu The cross context virtual CPU structure.
10490 * @param cbInstr The instruction length in bytes.
10491 *
10492 * @remarks In ring-0 not all of the state needs to be synced in.
10493 */
10494VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10495{
10496 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10497
10498 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10499 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
10500 Assert(!pVCpu->iem.s.cActiveMappings);
10501 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10502}
10503
10504
10505/**
10506 * Interface for HM and EM to emulate the INVLPG instruction.
10507 *
10508 * @returns Strict VBox status code.
10509 * @retval VINF_PGM_SYNC_CR3
10510 *
10511 * @param pVCpu The cross context virtual CPU structure.
10512 * @param cbInstr The instruction length in bytes.
10513 * @param GCPtrPage The effective address of the page to invalidate.
10514 *
10515 * @remarks In ring-0 not all of the state needs to be synced in.
10516 */
10517VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
10518{
10519 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10520
10521 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10522 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
10523 Assert(!pVCpu->iem.s.cActiveMappings);
10524 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10525}
10526
10527
10528/**
10529 * Interface for HM and EM to emulate the INVPCID instruction.
10530 *
10531 * @returns Strict VBox status code.
10532 * @retval VINF_PGM_SYNC_CR3
10533 *
10534 * @param pVCpu The cross context virtual CPU structure.
10535 * @param cbInstr The instruction length in bytes.
10536 * @param iEffSeg The effective segment register.
10537 * @param GCPtrDesc The effective address of the INVPCID descriptor.
10538 * @param uType The invalidation type.
10539 *
10540 * @remarks In ring-0 not all of the state needs to be synced in.
10541 */
10542VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
10543 uint64_t uType)
10544{
10545 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
10546
10547 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10548 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
10549 Assert(!pVCpu->iem.s.cActiveMappings);
10550 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10551}
10552
10553
10554/**
10555 * Interface for HM and EM to emulate the CPUID instruction.
10556 *
10557 * @returns Strict VBox status code.
10558 *
10559 * @param pVCpu The cross context virtual CPU structure.
10560 * @param cbInstr The instruction length in bytes.
10561 *
10562 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
10563 */
10564VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
10565{
10566 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10567 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
10568
10569 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10570 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
10571 Assert(!pVCpu->iem.s.cActiveMappings);
10572 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10573}
10574
10575
10576/**
10577 * Interface for HM and EM to emulate the RDPMC instruction.
10578 *
10579 * @returns Strict VBox status code.
10580 *
10581 * @param pVCpu The cross context virtual CPU structure.
10582 * @param cbInstr The instruction length in bytes.
10583 *
10584 * @remarks Not all of the state needs to be synced in.
10585 */
10586VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
10587{
10588 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10589 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10590
10591 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10592 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
10593 Assert(!pVCpu->iem.s.cActiveMappings);
10594 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10595}
10596
10597
10598/**
10599 * Interface for HM and EM to emulate the RDTSC instruction.
10600 *
10601 * @returns Strict VBox status code.
10602 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10603 *
10604 * @param pVCpu The cross context virtual CPU structure.
10605 * @param cbInstr The instruction length in bytes.
10606 *
10607 * @remarks Not all of the state needs to be synced in.
10608 */
10609VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
10610{
10611 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10612 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10613
10614 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10615 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
10616 Assert(!pVCpu->iem.s.cActiveMappings);
10617 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10618}
10619
10620
10621/**
10622 * Interface for HM and EM to emulate the RDTSCP instruction.
10623 *
10624 * @returns Strict VBox status code.
10625 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10626 *
10627 * @param pVCpu The cross context virtual CPU structure.
10628 * @param cbInstr The instruction length in bytes.
10629 *
10630 * @remarks Not all of the state needs to be synced in. Recommended
10631 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
10632 */
10633VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
10634{
10635 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10636 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
10637
10638 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10639 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
10640 Assert(!pVCpu->iem.s.cActiveMappings);
10641 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10642}
10643
10644
10645/**
10646 * Interface for HM and EM to emulate the RDMSR instruction.
10647 *
10648 * @returns Strict VBox status code.
10649 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10650 *
10651 * @param pVCpu The cross context virtual CPU structure.
10652 * @param cbInstr The instruction length in bytes.
10653 *
10654 * @remarks Not all of the state needs to be synced in. Requires RCX and
10655 * (currently) all MSRs.
10656 */
10657VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10658{
10659 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10660 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
10661
10662 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10663 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
10664 Assert(!pVCpu->iem.s.cActiveMappings);
10665 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10666}
10667
10668
10669/**
10670 * Interface for HM and EM to emulate the WRMSR instruction.
10671 *
10672 * @returns Strict VBox status code.
10673 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10674 *
10675 * @param pVCpu The cross context virtual CPU structure.
10676 * @param cbInstr The instruction length in bytes.
10677 *
10678 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
10679 * and (currently) all MSRs.
10680 */
10681VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10682{
10683 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10684 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
10685 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
10686
10687 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10688 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
10689 Assert(!pVCpu->iem.s.cActiveMappings);
10690 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10691}
10692
10693
10694/**
10695 * Interface for HM and EM to emulate the MONITOR instruction.
10696 *
10697 * @returns Strict VBox status code.
10698 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10699 *
10700 * @param pVCpu The cross context virtual CPU structure.
10701 * @param cbInstr The instruction length in bytes.
10702 *
10703 * @remarks Not all of the state needs to be synced in.
10704 * @remarks ASSUMES the default segment of DS and no segment override prefixes
10705 * are used.
10706 */
10707VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
10708{
10709 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10710 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
10711
10712 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10713 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
10714 Assert(!pVCpu->iem.s.cActiveMappings);
10715 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10716}
10717
10718
10719/**
10720 * Interface for HM and EM to emulate the MWAIT instruction.
10721 *
10722 * @returns Strict VBox status code.
10723 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10724 *
10725 * @param pVCpu The cross context virtual CPU structure.
10726 * @param cbInstr The instruction length in bytes.
10727 *
10728 * @remarks Not all of the state needs to be synced in.
10729 */
10730VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
10731{
10732 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10733 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
10734
10735 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10736 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
10737 Assert(!pVCpu->iem.s.cActiveMappings);
10738 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10739}
10740
10741
10742/**
10743 * Interface for HM and EM to emulate the HLT instruction.
10744 *
10745 * @returns Strict VBox status code.
10746 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10747 *
10748 * @param pVCpu The cross context virtual CPU structure.
10749 * @param cbInstr The instruction length in bytes.
10750 *
10751 * @remarks Not all of the state needs to be synced in.
10752 */
10753VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
10754{
10755 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10756
10757 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10758 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
10759 Assert(!pVCpu->iem.s.cActiveMappings);
10760 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10761}
10762
10763
10764/**
10765 * Checks if IEM is in the process of delivering an event (interrupt or
10766 * exception).
10767 *
10768 * @returns true if we're in the process of raising an interrupt or exception,
10769 * false otherwise.
10770 * @param pVCpu The cross context virtual CPU structure.
10771 * @param puVector Where to store the vector associated with the
10772 * currently delivered event, optional.
10773 * @param pfFlags Where to store th event delivery flags (see
10774 * IEM_XCPT_FLAGS_XXX), optional.
10775 * @param puErr Where to store the error code associated with the
10776 * event, optional.
10777 * @param puCr2 Where to store the CR2 associated with the event,
10778 * optional.
10779 * @remarks The caller should check the flags to determine if the error code and
10780 * CR2 are valid for the event.
10781 */
10782VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
10783{
10784 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
10785 if (fRaisingXcpt)
10786 {
10787 if (puVector)
10788 *puVector = pVCpu->iem.s.uCurXcpt;
10789 if (pfFlags)
10790 *pfFlags = pVCpu->iem.s.fCurXcpt;
10791 if (puErr)
10792 *puErr = pVCpu->iem.s.uCurXcptErr;
10793 if (puCr2)
10794 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
10795 }
10796 return fRaisingXcpt;
10797}
10798
10799#ifdef IN_RING3
10800
10801/**
10802 * Handles the unlikely and probably fatal merge cases.
10803 *
10804 * @returns Merged status code.
10805 * @param rcStrict Current EM status code.
10806 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
10807 * with @a rcStrict.
10808 * @param iMemMap The memory mapping index. For error reporting only.
10809 * @param pVCpu The cross context virtual CPU structure of the calling
10810 * thread, for error reporting only.
10811 */
10812DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
10813 unsigned iMemMap, PVMCPUCC pVCpu)
10814{
10815 if (RT_FAILURE_NP(rcStrict))
10816 return rcStrict;
10817
10818 if (RT_FAILURE_NP(rcStrictCommit))
10819 return rcStrictCommit;
10820
10821 if (rcStrict == rcStrictCommit)
10822 return rcStrictCommit;
10823
10824 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
10825 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
10826 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
10827 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
10828 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
10829 return VERR_IOM_FF_STATUS_IPE;
10830}
10831
10832
10833/**
10834 * Helper for IOMR3ProcessForceFlag.
10835 *
10836 * @returns Merged status code.
10837 * @param rcStrict Current EM status code.
10838 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
10839 * with @a rcStrict.
10840 * @param iMemMap The memory mapping index. For error reporting only.
10841 * @param pVCpu The cross context virtual CPU structure of the calling
10842 * thread, for error reporting only.
10843 */
10844DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
10845{
10846 /* Simple. */
10847 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
10848 return rcStrictCommit;
10849
10850 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
10851 return rcStrict;
10852
10853 /* EM scheduling status codes. */
10854 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
10855 && rcStrict <= VINF_EM_LAST))
10856 {
10857 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
10858 && rcStrictCommit <= VINF_EM_LAST))
10859 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
10860 }
10861
10862 /* Unlikely */
10863 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
10864}
10865
10866
10867/**
10868 * Called by force-flag handling code when VMCPU_FF_IEM is set.
10869 *
10870 * @returns Merge between @a rcStrict and what the commit operation returned.
10871 * @param pVM The cross context VM structure.
10872 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10873 * @param rcStrict The status code returned by ring-0 or raw-mode.
10874 */
10875VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
10876{
10877 /*
10878 * Reset the pending commit.
10879 */
10880 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
10881 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
10882 ("%#x %#x %#x\n",
10883 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
10884 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
10885
10886 /*
10887 * Commit the pending bounce buffers (usually just one).
10888 */
10889 unsigned cBufs = 0;
10890 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
10891 while (iMemMap-- > 0)
10892 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
10893 {
10894 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
10895 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
10896 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
10897
10898 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
10899 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
10900 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
10901
10902 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
10903 {
10904 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
10905 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
10906 pbBuf,
10907 cbFirst,
10908 PGMACCESSORIGIN_IEM);
10909 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
10910 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
10911 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
10912 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
10913 }
10914
10915 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
10916 {
10917 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
10918 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
10919 pbBuf + cbFirst,
10920 cbSecond,
10921 PGMACCESSORIGIN_IEM);
10922 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
10923 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
10924 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
10925 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
10926 }
10927 cBufs++;
10928 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
10929 }
10930
10931 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
10932 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
10933 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
10934 pVCpu->iem.s.cActiveMappings = 0;
10935 return rcStrict;
10936}
10937
10938#endif /* IN_RING3 */
10939
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette