VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 100811

Last change on this file since 100811 was 100811, checked in by vboxsync, 20 months ago

VMM/IEM: Working on implementing the FLAT mode (64-bit mode and 32-bit FLAT) optimizations. Introduced a special 64-bit FS+GS(+CS) variant so we can deal with it the same way as the flat 32-bit variant, this means lumping CS prefixed stuff (unlikely) in with FS and GS. We call the FLAT variant for DS, ES, and SS accesses and the other mode for memory accesses via FS, GS and CS. bugref:10369

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 478.9 KB
Line 
1/* $Id: IEMAll.cpp 100811 2023-08-06 01:54:38Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_iem IEM - Interpreted Execution Manager
30 *
31 * The interpreted exeuction manager (IEM) is for executing short guest code
32 * sequences that are causing too many exits / virtualization traps. It will
33 * also be used to interpret single instructions, thus replacing the selective
34 * interpreters in EM and IOM.
35 *
36 * Design goals:
37 * - Relatively small footprint, although we favour speed and correctness
38 * over size.
39 * - Reasonably fast.
40 * - Correctly handle lock prefixed instructions.
41 * - Complete instruction set - eventually.
42 * - Refactorable into a recompiler, maybe.
43 * - Replace EMInterpret*.
44 *
45 * Using the existing disassembler has been considered, however this is thought
46 * to conflict with speed as the disassembler chews things a bit too much while
47 * leaving us with a somewhat complicated state to interpret afterwards.
48 *
49 *
50 * The current code is very much work in progress. You've been warned!
51 *
52 *
53 * @section sec_iem_fpu_instr FPU Instructions
54 *
55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
56 * same or equivalent instructions on the host FPU. To make life easy, we also
57 * let the FPU prioritize the unmasked exceptions for us. This however, only
58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
59 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
60 * can trigger spurious FPU exceptions.
61 *
62 * The guest FPU state is not loaded into the host CPU and kept there till we
63 * leave IEM because the calling conventions have declared an all year open
64 * season on much of the FPU state. For instance an innocent looking call to
65 * memcpy might end up using a whole bunch of XMM or MM registers if the
66 * particular implementation finds it worthwhile.
67 *
68 *
69 * @section sec_iem_logging Logging
70 *
71 * The IEM code uses the \"IEM\" log group for the main logging. The different
72 * logging levels/flags are generally used for the following purposes:
73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
74 * - Flow (LogFlow) : Basic enter/exit IEM state info.
75 * - Level 2 (Log2) : ?
76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
78 * - Level 5 (Log5) : Decoding details.
79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
80 * - Level 7 (Log7) : iret++ execution logging.
81 * - Level 8 (Log8) : Memory writes.
82 * - Level 9 (Log9) : Memory reads.
83 * - Level 10 (Log10): TLBs.
84 * - Level 11 (Log11): Unmasked FPU exceptions.
85 *
86 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
87 * - Level 1 (Log) : Errors and other major events.
88 * - Flow (LogFlow) : Misc flow stuff (cleanup?)
89 * - Level 2 (Log2) : VM exits.
90 *
91 * The syscall logging level assignments:
92 * - Level 1: DOS and BIOS.
93 * - Level 2: Windows 3.x
94 * - Level 3: Linux.
95 */
96
97/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
98#ifdef _MSC_VER
99# pragma warning(disable:4505)
100#endif
101
102
103/*********************************************************************************************************************************
104* Header Files *
105*********************************************************************************************************************************/
106#define LOG_GROUP LOG_GROUP_IEM
107#define VMCPU_INCL_CPUM_GST_CTX
108#include <VBox/vmm/iem.h>
109#include <VBox/vmm/cpum.h>
110#include <VBox/vmm/apic.h>
111#include <VBox/vmm/pdm.h>
112#include <VBox/vmm/pgm.h>
113#include <VBox/vmm/iom.h>
114#include <VBox/vmm/em.h>
115#include <VBox/vmm/hm.h>
116#include <VBox/vmm/nem.h>
117#include <VBox/vmm/gim.h>
118#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
119# include <VBox/vmm/em.h>
120# include <VBox/vmm/hm_svm.h>
121#endif
122#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
123# include <VBox/vmm/hmvmxinline.h>
124#endif
125#include <VBox/vmm/tm.h>
126#include <VBox/vmm/dbgf.h>
127#include <VBox/vmm/dbgftrace.h>
128#include "IEMInternal.h"
129#include <VBox/vmm/vmcc.h>
130#include <VBox/log.h>
131#include <VBox/err.h>
132#include <VBox/param.h>
133#include <VBox/dis.h>
134#include <iprt/asm-math.h>
135#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
136# include <iprt/asm-amd64-x86.h>
137#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
138# include <iprt/asm-arm.h>
139#endif
140#include <iprt/assert.h>
141#include <iprt/string.h>
142#include <iprt/x86.h>
143
144#include "IEMInline.h"
145
146
147/*********************************************************************************************************************************
148* Structures and Typedefs *
149*********************************************************************************************************************************/
150/**
151 * CPU exception classes.
152 */
153typedef enum IEMXCPTCLASS
154{
155 IEMXCPTCLASS_BENIGN,
156 IEMXCPTCLASS_CONTRIBUTORY,
157 IEMXCPTCLASS_PAGE_FAULT,
158 IEMXCPTCLASS_DOUBLE_FAULT
159} IEMXCPTCLASS;
160
161
162/*********************************************************************************************************************************
163* Global Variables *
164*********************************************************************************************************************************/
165#if defined(IEM_LOG_MEMORY_WRITES)
166/** What IEM just wrote. */
167uint8_t g_abIemWrote[256];
168/** How much IEM just wrote. */
169size_t g_cbIemWrote;
170#endif
171
172
173/*********************************************************************************************************************************
174* Internal Functions *
175*********************************************************************************************************************************/
176static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
177 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
178
179
180/**
181 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags, slow code
182 * path.
183 *
184 * @returns IEM_F_BRK_PENDING_XXX or zero.
185 * @param pVCpu The cross context virtual CPU structure of the
186 * calling thread.
187 *
188 * @note Don't call directly, use iemCalcExecDbgFlags instead.
189 */
190uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu)
191{
192 uint32_t fExec = 0;
193
194 /*
195 * Process guest breakpoints.
196 */
197#define PROCESS_ONE_BP(a_fDr7, a_iBp) do { \
198 if (a_fDr7 & X86_DR7_L_G(a_iBp)) \
199 { \
200 switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \
201 { \
202 case X86_DR7_RW_EO: \
203 fExec |= IEM_F_PENDING_BRK_INSTR; \
204 break; \
205 case X86_DR7_RW_WO: \
206 case X86_DR7_RW_RW: \
207 fExec |= IEM_F_PENDING_BRK_DATA; \
208 break; \
209 case X86_DR7_RW_IO: \
210 fExec |= IEM_F_PENDING_BRK_X86_IO; \
211 break; \
212 } \
213 } \
214 } while (0)
215
216 uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];
217 if (fGstDr7 & X86_DR7_ENABLED_MASK)
218 {
219 PROCESS_ONE_BP(fGstDr7, 0);
220 PROCESS_ONE_BP(fGstDr7, 1);
221 PROCESS_ONE_BP(fGstDr7, 2);
222 PROCESS_ONE_BP(fGstDr7, 3);
223 }
224
225 /*
226 * Process hypervisor breakpoints.
227 */
228 uint32_t const fHyperDr7 = DBGFBpGetDR7(pVCpu->CTX_SUFF(pVM));
229 if (fHyperDr7 & X86_DR7_ENABLED_MASK)
230 {
231 PROCESS_ONE_BP(fHyperDr7, 0);
232 PROCESS_ONE_BP(fHyperDr7, 1);
233 PROCESS_ONE_BP(fHyperDr7, 2);
234 PROCESS_ONE_BP(fHyperDr7, 3);
235 }
236
237 return fExec;
238}
239
240
241/**
242 * Initializes the decoder state.
243 *
244 * iemReInitDecoder is mostly a copy of this function.
245 *
246 * @param pVCpu The cross context virtual CPU structure of the
247 * calling thread.
248 * @param fExecOpts Optional execution flags:
249 * - IEM_F_BYPASS_HANDLERS
250 * - IEM_F_X86_DISREGARD_LOCK
251 */
252DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, uint32_t fExecOpts)
253{
254 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
255 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
256 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
257 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
258 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
259 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
260 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
261 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
262 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
263 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
264
265 /* Execution state: */
266 uint32_t fExec;
267 pVCpu->iem.s.fExec = fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
268
269 /* Decoder state: */
270 pVCpu->iem.s.enmDefAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
271 pVCpu->iem.s.enmEffAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK;
272 if ((fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)
273 {
274 pVCpu->iem.s.enmDefOpSize = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
275 pVCpu->iem.s.enmEffOpSize = fExec & IEM_F_MODE_CPUMODE_MASK;
276 }
277 else
278 {
279 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
280 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
281 }
282 pVCpu->iem.s.fPrefixes = 0;
283 pVCpu->iem.s.uRexReg = 0;
284 pVCpu->iem.s.uRexB = 0;
285 pVCpu->iem.s.uRexIndex = 0;
286 pVCpu->iem.s.idxPrefix = 0;
287 pVCpu->iem.s.uVex3rdReg = 0;
288 pVCpu->iem.s.uVexLength = 0;
289 pVCpu->iem.s.fEvexStuff = 0;
290 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
291#ifdef IEM_WITH_CODE_TLB
292 pVCpu->iem.s.pbInstrBuf = NULL;
293 pVCpu->iem.s.offInstrNextByte = 0;
294 pVCpu->iem.s.offCurInstrStart = 0;
295# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
296 pVCpu->iem.s.offOpcode = 0;
297# endif
298# ifdef VBOX_STRICT
299 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
300 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
301 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
302 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
303# endif
304#else
305 pVCpu->iem.s.offOpcode = 0;
306 pVCpu->iem.s.cbOpcode = 0;
307#endif
308 pVCpu->iem.s.offModRm = 0;
309 pVCpu->iem.s.cActiveMappings = 0;
310 pVCpu->iem.s.iNextMapping = 0;
311 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
312
313#ifdef DBGFTRACE_ENABLED
314 switch (IEM_GET_CPU_MODE(pVCpu))
315 {
316 case IEMMODE_64BIT:
317 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
318 break;
319 case IEMMODE_32BIT:
320 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
321 break;
322 case IEMMODE_16BIT:
323 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
324 break;
325 }
326#endif
327}
328
329
330/**
331 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
332 *
333 * This is mostly a copy of iemInitDecoder.
334 *
335 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
336 */
337DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
338{
339 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
340 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
341 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
342 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
343 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
344 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
345 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
346 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
347 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
348
349 /* ASSUMES: Anyone changing CPU state affecting the fExec bits will update them! */
350 AssertMsg((pVCpu->iem.s.fExec & ~IEM_F_USER_OPTS) == iemCalcExecFlags(pVCpu),
351 ("fExec=%#x iemCalcExecModeFlags=%#x\n", pVCpu->iem.s.fExec, iemCalcExecFlags(pVCpu)));
352
353 IEMMODE const enmMode = IEM_GET_CPU_MODE(pVCpu);
354 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
355 pVCpu->iem.s.enmEffAddrMode = enmMode;
356 if (enmMode != IEMMODE_64BIT)
357 {
358 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
359 pVCpu->iem.s.enmEffOpSize = enmMode;
360 }
361 else
362 {
363 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
364 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
365 }
366 pVCpu->iem.s.fPrefixes = 0;
367 pVCpu->iem.s.uRexReg = 0;
368 pVCpu->iem.s.uRexB = 0;
369 pVCpu->iem.s.uRexIndex = 0;
370 pVCpu->iem.s.idxPrefix = 0;
371 pVCpu->iem.s.uVex3rdReg = 0;
372 pVCpu->iem.s.uVexLength = 0;
373 pVCpu->iem.s.fEvexStuff = 0;
374 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
375#ifdef IEM_WITH_CODE_TLB
376 if (pVCpu->iem.s.pbInstrBuf)
377 {
378 uint64_t off = (enmMode == IEMMODE_64BIT
379 ? pVCpu->cpum.GstCtx.rip
380 : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
381 - pVCpu->iem.s.uInstrBufPc;
382 if (off < pVCpu->iem.s.cbInstrBufTotal)
383 {
384 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
385 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
386 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
387 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
388 else
389 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
390 }
391 else
392 {
393 pVCpu->iem.s.pbInstrBuf = NULL;
394 pVCpu->iem.s.offInstrNextByte = 0;
395 pVCpu->iem.s.offCurInstrStart = 0;
396 pVCpu->iem.s.cbInstrBuf = 0;
397 pVCpu->iem.s.cbInstrBufTotal = 0;
398 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
399 }
400 }
401 else
402 {
403 pVCpu->iem.s.offInstrNextByte = 0;
404 pVCpu->iem.s.offCurInstrStart = 0;
405 pVCpu->iem.s.cbInstrBuf = 0;
406 pVCpu->iem.s.cbInstrBufTotal = 0;
407# ifdef VBOX_STRICT
408 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
409# endif
410 }
411# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
412 pVCpu->iem.s.offOpcode = 0;
413# endif
414#else /* !IEM_WITH_CODE_TLB */
415 pVCpu->iem.s.cbOpcode = 0;
416 pVCpu->iem.s.offOpcode = 0;
417#endif /* !IEM_WITH_CODE_TLB */
418 pVCpu->iem.s.offModRm = 0;
419 Assert(pVCpu->iem.s.cActiveMappings == 0);
420 pVCpu->iem.s.iNextMapping = 0;
421 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
422 Assert(!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS));
423
424#ifdef DBGFTRACE_ENABLED
425 switch (enmMode)
426 {
427 case IEMMODE_64BIT:
428 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
429 break;
430 case IEMMODE_32BIT:
431 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
432 break;
433 case IEMMODE_16BIT:
434 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
435 break;
436 }
437#endif
438}
439
440
441
442/**
443 * Prefetch opcodes the first time when starting executing.
444 *
445 * @returns Strict VBox status code.
446 * @param pVCpu The cross context virtual CPU structure of the
447 * calling thread.
448 * @param fExecOpts Optional execution flags:
449 * - IEM_F_BYPASS_HANDLERS
450 * - IEM_F_X86_DISREGARD_LOCK
451 */
452static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
453{
454 iemInitDecoder(pVCpu, fExecOpts);
455
456#ifndef IEM_WITH_CODE_TLB
457 /*
458 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
459 *
460 * First translate CS:rIP to a physical address.
461 *
462 * Note! The iemOpcodeFetchMoreBytes code depends on this here code to fetch
463 * all relevant bytes from the first page, as it ASSUMES it's only ever
464 * called for dealing with CS.LIM, page crossing and instructions that
465 * are too long.
466 */
467 uint32_t cbToTryRead;
468 RTGCPTR GCPtrPC;
469 if (IEM_IS_64BIT_CODE(pVCpu))
470 {
471 cbToTryRead = GUEST_PAGE_SIZE;
472 GCPtrPC = pVCpu->cpum.GstCtx.rip;
473 if (IEM_IS_CANONICAL(GCPtrPC))
474 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
475 else
476 return iemRaiseGeneralProtectionFault0(pVCpu);
477 }
478 else
479 {
480 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
481 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu), ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
482 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
483 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
484 else
485 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
486 if (cbToTryRead) { /* likely */ }
487 else /* overflowed */
488 {
489 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
490 cbToTryRead = UINT32_MAX;
491 }
492 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
493 Assert(GCPtrPC <= UINT32_MAX);
494 }
495
496 PGMPTWALK Walk;
497 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk);
498 if (RT_SUCCESS(rc))
499 Assert(Walk.fSucceeded); /* probable. */
500 else
501 {
502 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
503# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
504 if (Walk.fFailed & PGM_WALKFAIL_EPT)
505 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
506# endif
507 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);
508 }
509 if ((Walk.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3) { /* likely */ }
510 else
511 {
512 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
513# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
514 if (Walk.fFailed & PGM_WALKFAIL_EPT)
515 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
516# endif
517 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
518 }
519 if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
520 else
521 {
522 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
523# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
524 if (Walk.fFailed & PGM_WALKFAIL_EPT)
525 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
526# endif
527 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
528 }
529 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
530 /** @todo Check reserved bits and such stuff. PGM is better at doing
531 * that, so do it when implementing the guest virtual address
532 * TLB... */
533
534 /*
535 * Read the bytes at this address.
536 */
537 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
538 if (cbToTryRead > cbLeftOnPage)
539 cbToTryRead = cbLeftOnPage;
540 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
541 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
542
543 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
544 {
545 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
546 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
547 { /* likely */ }
548 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
549 {
550 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
551 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
552 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
553 }
554 else
555 {
556 Log((RT_SUCCESS(rcStrict)
557 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
558 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
559 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
560 return rcStrict;
561 }
562 }
563 else
564 {
565 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
566 if (RT_SUCCESS(rc))
567 { /* likely */ }
568 else
569 {
570 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
571 GCPtrPC, GCPhys, rc, cbToTryRead));
572 return rc;
573 }
574 }
575 pVCpu->iem.s.cbOpcode = cbToTryRead;
576#endif /* !IEM_WITH_CODE_TLB */
577 return VINF_SUCCESS;
578}
579
580
581/**
582 * Invalidates the IEM TLBs.
583 *
584 * This is called internally as well as by PGM when moving GC mappings.
585 *
586 * @param pVCpu The cross context virtual CPU structure of the calling
587 * thread.
588 */
589VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
590{
591#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
592 Log10(("IEMTlbInvalidateAll\n"));
593# ifdef IEM_WITH_CODE_TLB
594 pVCpu->iem.s.cbInstrBufTotal = 0;
595 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
596 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
597 { /* very likely */ }
598 else
599 {
600 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
601 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
602 while (i-- > 0)
603 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
604 }
605# endif
606
607# ifdef IEM_WITH_DATA_TLB
608 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
609 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
610 { /* very likely */ }
611 else
612 {
613 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
614 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
615 while (i-- > 0)
616 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
617 }
618# endif
619#else
620 RT_NOREF(pVCpu);
621#endif
622}
623
624
625/**
626 * Invalidates a page in the TLBs.
627 *
628 * @param pVCpu The cross context virtual CPU structure of the calling
629 * thread.
630 * @param GCPtr The address of the page to invalidate
631 * @thread EMT(pVCpu)
632 */
633VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
634{
635#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
636 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
637 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
638 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
639 uintptr_t const idx = IEMTLB_TAG_TO_INDEX(GCPtr);
640
641# ifdef IEM_WITH_CODE_TLB
642 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
643 {
644 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
645 if (GCPtr == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
646 pVCpu->iem.s.cbInstrBufTotal = 0;
647 }
648# endif
649
650# ifdef IEM_WITH_DATA_TLB
651 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
652 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
653# endif
654#else
655 NOREF(pVCpu); NOREF(GCPtr);
656#endif
657}
658
659
660#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
661/**
662 * Invalid both TLBs slow fashion following a rollover.
663 *
664 * Worker for IEMTlbInvalidateAllPhysical,
665 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
666 * iemMemMapJmp and others.
667 *
668 * @thread EMT(pVCpu)
669 */
670static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
671{
672 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
673 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
674 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
675
676 unsigned i;
677# ifdef IEM_WITH_CODE_TLB
678 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
679 while (i-- > 0)
680 {
681 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
682 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
683 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
684 }
685# endif
686# ifdef IEM_WITH_DATA_TLB
687 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
688 while (i-- > 0)
689 {
690 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
691 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
692 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
693 }
694# endif
695
696}
697#endif
698
699
700/**
701 * Invalidates the host physical aspects of the IEM TLBs.
702 *
703 * This is called internally as well as by PGM when moving GC mappings.
704 *
705 * @param pVCpu The cross context virtual CPU structure of the calling
706 * thread.
707 * @note Currently not used.
708 */
709VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
710{
711#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
712 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
713 Log10(("IEMTlbInvalidateAllPhysical\n"));
714
715# ifdef IEM_WITH_CODE_TLB
716 pVCpu->iem.s.cbInstrBufTotal = 0;
717# endif
718 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
719 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
720 {
721 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
722 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
723 }
724 else
725 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
726#else
727 NOREF(pVCpu);
728#endif
729}
730
731
732/**
733 * Invalidates the host physical aspects of the IEM TLBs.
734 *
735 * This is called internally as well as by PGM when moving GC mappings.
736 *
737 * @param pVM The cross context VM structure.
738 * @param idCpuCaller The ID of the calling EMT if available to the caller,
739 * otherwise NIL_VMCPUID.
740 *
741 * @remarks Caller holds the PGM lock.
742 */
743VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller)
744{
745#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
746 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
747 if (pVCpuCaller)
748 VMCPU_ASSERT_EMT(pVCpuCaller);
749 Log10(("IEMTlbInvalidateAllPhysicalAllCpus\n"));
750
751 VMCC_FOR_EACH_VMCPU(pVM)
752 {
753# ifdef IEM_WITH_CODE_TLB
754 if (pVCpuCaller == pVCpu)
755 pVCpu->iem.s.cbInstrBufTotal = 0;
756# endif
757
758 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
759 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
760 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
761 { /* likely */}
762 else if (pVCpuCaller == pVCpu)
763 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
764 else
765 {
766 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
767 continue;
768 }
769 ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
770 ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
771 }
772 VMCC_FOR_EACH_VMCPU_END(pVM);
773
774#else
775 RT_NOREF(pVM, idCpuCaller);
776#endif
777}
778
779
780/**
781 * Flushes the prefetch buffer, light version.
782 */
783void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr)
784{
785#ifndef IEM_WITH_CODE_TLB
786 pVCpu->iem.s.cbOpcode = cbInstr;
787#else
788 RT_NOREF(pVCpu, cbInstr);
789#endif
790}
791
792
793/**
794 * Flushes the prefetch buffer, heavy version.
795 */
796void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr)
797{
798#ifndef IEM_WITH_CODE_TLB
799 pVCpu->iem.s.cbOpcode = cbInstr; /* Note! SVM and VT-x may set this to zero on exit, rather than the instruction length. */
800#elif 1
801 pVCpu->iem.s.pbInstrBuf = NULL;
802 pVCpu->iem.s.cbInstrBufTotal = 0;
803 RT_NOREF(cbInstr);
804#else
805 RT_NOREF(pVCpu, cbInstr);
806#endif
807}
808
809
810
811#ifdef IEM_WITH_CODE_TLB
812
813/**
814 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
815 * failure and jumps.
816 *
817 * We end up here for a number of reasons:
818 * - pbInstrBuf isn't yet initialized.
819 * - Advancing beyond the buffer boundrary (e.g. cross page).
820 * - Advancing beyond the CS segment limit.
821 * - Fetching from non-mappable page (e.g. MMIO).
822 *
823 * @param pVCpu The cross context virtual CPU structure of the
824 * calling thread.
825 * @param pvDst Where to return the bytes.
826 * @param cbDst Number of bytes to read. A value of zero is
827 * allowed for initializing pbInstrBuf (the
828 * recompiler does this). In this case it is best
829 * to set pbInstrBuf to NULL prior to the call.
830 */
831void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP
832{
833# ifdef IN_RING3
834 for (;;)
835 {
836 Assert(cbDst <= 8);
837 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
838
839 /*
840 * We might have a partial buffer match, deal with that first to make the
841 * rest simpler. This is the first part of the cross page/buffer case.
842 */
843 if (pVCpu->iem.s.pbInstrBuf != NULL)
844 {
845 if (offBuf < pVCpu->iem.s.cbInstrBuf)
846 {
847 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
848 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
849 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
850
851 cbDst -= cbCopy;
852 pvDst = (uint8_t *)pvDst + cbCopy;
853 offBuf += cbCopy;
854 pVCpu->iem.s.offInstrNextByte += offBuf;
855 }
856 }
857
858 /*
859 * Check segment limit, figuring how much we're allowed to access at this point.
860 *
861 * We will fault immediately if RIP is past the segment limit / in non-canonical
862 * territory. If we do continue, there are one or more bytes to read before we
863 * end up in trouble and we need to do that first before faulting.
864 */
865 RTGCPTR GCPtrFirst;
866 uint32_t cbMaxRead;
867 if (IEM_IS_64BIT_CODE(pVCpu))
868 {
869 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
870 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
871 { /* likely */ }
872 else
873 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
874 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
875 }
876 else
877 {
878 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
879 /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
880 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
881 { /* likely */ }
882 else /** @todo For CPUs older than the 386, we should not necessarily generate \#GP here but wrap around! */
883 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
884 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
885 if (cbMaxRead != 0)
886 { /* likely */ }
887 else
888 {
889 /* Overflowed because address is 0 and limit is max. */
890 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
891 cbMaxRead = X86_PAGE_SIZE;
892 }
893 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
894 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
895 if (cbMaxRead2 < cbMaxRead)
896 cbMaxRead = cbMaxRead2;
897 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
898 }
899
900 /*
901 * Get the TLB entry for this piece of code.
902 */
903 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.CodeTlb, GCPtrFirst);
904 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.CodeTlb, uTag);
905 if (pTlbe->uTag == uTag)
906 {
907 /* likely when executing lots of code, otherwise unlikely */
908# ifdef VBOX_WITH_STATISTICS
909 pVCpu->iem.s.CodeTlb.cTlbHits++;
910# endif
911 }
912 else
913 {
914 pVCpu->iem.s.CodeTlb.cTlbMisses++;
915 PGMPTWALK Walk;
916 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk);
917 if (RT_FAILURE(rc))
918 {
919#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
920 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? */
921 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
922#endif
923 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
924 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, rc);
925 }
926
927 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
928 Assert(Walk.fSucceeded);
929 pTlbe->uTag = uTag;
930 pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
931 | (Walk.fEffective >> X86_PTE_PAE_BIT_NX);
932 pTlbe->GCPhys = Walk.GCPhys;
933 pTlbe->pbMappingR3 = NULL;
934 }
935
936 /*
937 * Check TLB page table level access flags.
938 */
939 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
940 {
941 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && IEM_GET_CPL(pVCpu) == 3)
942 {
943 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
944 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
945 }
946 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
947 {
948 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
949 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
950 }
951 }
952
953 /*
954 * Look up the physical page info if necessary.
955 */
956 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
957 { /* not necessary */ }
958 else
959 {
960 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
961 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
962 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
963 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
964 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
965 { /* likely */ }
966 else
967 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
968 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
969 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
970 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
971 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
972 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
973 }
974
975# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
976 /*
977 * Try do a direct read using the pbMappingR3 pointer.
978 */
979 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
980 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
981 {
982 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
983 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
984 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
985 {
986 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
987 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
988 }
989 else
990 {
991 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
992 if (cbInstr + (uint32_t)cbDst <= 15)
993 {
994 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
995 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
996 }
997 else
998 {
999 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1000 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1001 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1002 }
1003 }
1004 if (cbDst <= cbMaxRead)
1005 {
1006 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0; /** @todo Spurious load effect on branch handling? */
1007 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1008
1009 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1010 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1011 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1012 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1013 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1014 return;
1015 }
1016 pVCpu->iem.s.pbInstrBuf = NULL;
1017
1018 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1019 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1020 }
1021# else
1022# error "refactor as needed"
1023 /*
1024 * If there is no special read handling, so we can read a bit more and
1025 * put it in the prefetch buffer.
1026 */
1027 if ( cbDst < cbMaxRead
1028 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1029 {
1030 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1031 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1032 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1033 { /* likely */ }
1034 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1035 {
1036 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1037 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1038 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1039 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));
1040 }
1041 else
1042 {
1043 Log((RT_SUCCESS(rcStrict)
1044 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1045 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1046 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1047 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1048 }
1049 }
1050# endif
1051 /*
1052 * Special read handling, so only read exactly what's needed.
1053 * This is a highly unlikely scenario.
1054 */
1055 else
1056 {
1057 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1058
1059 /* Check instruction length. */
1060 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1061 if (RT_LIKELY(cbInstr + cbDst <= 15))
1062 { /* likely */ }
1063 else
1064 {
1065 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0) [slow]\n",
1066 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1067 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1068 }
1069
1070 /* Do the reading. */
1071 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1072 if (cbToRead > 0)
1073 {
1074 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1075 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1076 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1077 { /* likely */ }
1078 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1079 {
1080 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1081 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1082 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1083 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));
1084 }
1085 else
1086 {
1087 Log((RT_SUCCESS(rcStrict)
1088 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1089 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1090 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1091 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1092 }
1093 }
1094
1095 /* Update the state and probably return. */
1096 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1097 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0;
1098 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1099
1100 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1101 pVCpu->iem.s.offInstrNextByte = offPg + cbInstr + cbToRead;
1102 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead + cbInstr) - cbToRead - cbInstr;
1103 pVCpu->iem.s.cbInstrBufTotal = X86_PAGE_SIZE; /** @todo ??? */
1104 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1105 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1106 pVCpu->iem.s.pbInstrBuf = NULL;
1107 if (cbToRead == cbDst)
1108 return;
1109 }
1110
1111 /*
1112 * More to read, loop.
1113 */
1114 cbDst -= cbMaxRead;
1115 pvDst = (uint8_t *)pvDst + cbMaxRead;
1116 }
1117# else /* !IN_RING3 */
1118 RT_NOREF(pvDst, cbDst);
1119 if (pvDst || cbDst)
1120 IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);
1121# endif /* !IN_RING3 */
1122}
1123
1124#else /* !IEM_WITH_CODE_TLB */
1125
1126/**
1127 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1128 * exception if it fails.
1129 *
1130 * @returns Strict VBox status code.
1131 * @param pVCpu The cross context virtual CPU structure of the
1132 * calling thread.
1133 * @param cbMin The minimum number of bytes relative offOpcode
1134 * that must be read.
1135 */
1136VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
1137{
1138 /*
1139 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1140 *
1141 * First translate CS:rIP to a physical address.
1142 */
1143 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;
1144 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1145 uint8_t const cbLeft = cbOpcode - offOpcode;
1146 Assert(cbLeft < cbMin);
1147 Assert(cbOpcode <= sizeof(pVCpu->iem.s.abOpcode));
1148
1149 uint32_t cbToTryRead;
1150 RTGCPTR GCPtrNext;
1151 if (IEM_IS_64BIT_CODE(pVCpu))
1152 {
1153 GCPtrNext = pVCpu->cpum.GstCtx.rip + cbOpcode;
1154 if (!IEM_IS_CANONICAL(GCPtrNext))
1155 return iemRaiseGeneralProtectionFault0(pVCpu);
1156 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1157 }
1158 else
1159 {
1160 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1161 /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
1162 GCPtrNext32 += cbOpcode;
1163 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1164 /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
1165 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1166 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1167 if (!cbToTryRead) /* overflowed */
1168 {
1169 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1170 cbToTryRead = UINT32_MAX;
1171 /** @todo check out wrapping around the code segment. */
1172 }
1173 if (cbToTryRead < cbMin - cbLeft)
1174 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1175 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1176
1177 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1178 if (cbToTryRead > cbLeftOnPage)
1179 cbToTryRead = cbLeftOnPage;
1180 }
1181
1182 /* Restrict to opcode buffer space.
1183
1184 We're making ASSUMPTIONS here based on work done previously in
1185 iemInitDecoderAndPrefetchOpcodes, where bytes from the first page will
1186 be fetched in case of an instruction crossing two pages. */
1187 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - cbOpcode)
1188 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - cbOpcode;
1189 if (RT_LIKELY(cbToTryRead + cbLeft >= cbMin))
1190 { /* likely */ }
1191 else
1192 {
1193 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1194 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, offOpcode, cbMin));
1195 return iemRaiseGeneralProtectionFault0(pVCpu);
1196 }
1197
1198 PGMPTWALK Walk;
1199 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk);
1200 if (RT_FAILURE(rc))
1201 {
1202 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1203#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1204 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1205 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1206#endif
1207 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc);
1208 }
1209 if (!(Walk.fEffective & X86_PTE_US) && IEM_GET_CPL(pVCpu) == 3)
1210 {
1211 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1212#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1213 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1214 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1215#endif
1216 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1217 }
1218 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1219 {
1220 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1221#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1222 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1223 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1224#endif
1225 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1226 }
1227 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1228 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode));
1229 /** @todo Check reserved bits and such stuff. PGM is better at doing
1230 * that, so do it when implementing the guest virtual address
1231 * TLB... */
1232
1233 /*
1234 * Read the bytes at this address.
1235 *
1236 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1237 * and since PATM should only patch the start of an instruction there
1238 * should be no need to check again here.
1239 */
1240 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
1241 {
1242 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[cbOpcode],
1243 cbToTryRead, PGMACCESSORIGIN_IEM);
1244 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1245 { /* likely */ }
1246 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1247 {
1248 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1249 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1250 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1251 }
1252 else
1253 {
1254 Log((RT_SUCCESS(rcStrict)
1255 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1256 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1257 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1258 return rcStrict;
1259 }
1260 }
1261 else
1262 {
1263 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[cbOpcode], GCPhys, cbToTryRead);
1264 if (RT_SUCCESS(rc))
1265 { /* likely */ }
1266 else
1267 {
1268 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1269 return rc;
1270 }
1271 }
1272 pVCpu->iem.s.cbOpcode = cbOpcode + cbToTryRead;
1273 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1274
1275 return VINF_SUCCESS;
1276}
1277
1278#endif /* !IEM_WITH_CODE_TLB */
1279#ifndef IEM_WITH_SETJMP
1280
1281/**
1282 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1283 *
1284 * @returns Strict VBox status code.
1285 * @param pVCpu The cross context virtual CPU structure of the
1286 * calling thread.
1287 * @param pb Where to return the opcode byte.
1288 */
1289VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1290{
1291 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1292 if (rcStrict == VINF_SUCCESS)
1293 {
1294 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1295 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1296 pVCpu->iem.s.offOpcode = offOpcode + 1;
1297 }
1298 else
1299 *pb = 0;
1300 return rcStrict;
1301}
1302
1303#else /* IEM_WITH_SETJMP */
1304
1305/**
1306 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1307 *
1308 * @returns The opcode byte.
1309 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1310 */
1311uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1312{
1313# ifdef IEM_WITH_CODE_TLB
1314 uint8_t u8;
1315 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1316 return u8;
1317# else
1318 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1319 if (rcStrict == VINF_SUCCESS)
1320 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1321 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1322# endif
1323}
1324
1325#endif /* IEM_WITH_SETJMP */
1326
1327#ifndef IEM_WITH_SETJMP
1328
1329/**
1330 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1331 *
1332 * @returns Strict VBox status code.
1333 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1334 * @param pu16 Where to return the opcode dword.
1335 */
1336VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1337{
1338 uint8_t u8;
1339 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1340 if (rcStrict == VINF_SUCCESS)
1341 *pu16 = (int8_t)u8;
1342 return rcStrict;
1343}
1344
1345
1346/**
1347 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1348 *
1349 * @returns Strict VBox status code.
1350 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1351 * @param pu32 Where to return the opcode dword.
1352 */
1353VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1354{
1355 uint8_t u8;
1356 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1357 if (rcStrict == VINF_SUCCESS)
1358 *pu32 = (int8_t)u8;
1359 return rcStrict;
1360}
1361
1362
1363/**
1364 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1365 *
1366 * @returns Strict VBox status code.
1367 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1368 * @param pu64 Where to return the opcode qword.
1369 */
1370VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1371{
1372 uint8_t u8;
1373 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1374 if (rcStrict == VINF_SUCCESS)
1375 *pu64 = (int8_t)u8;
1376 return rcStrict;
1377}
1378
1379#endif /* !IEM_WITH_SETJMP */
1380
1381
1382#ifndef IEM_WITH_SETJMP
1383
1384/**
1385 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1386 *
1387 * @returns Strict VBox status code.
1388 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1389 * @param pu16 Where to return the opcode word.
1390 */
1391VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1392{
1393 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1394 if (rcStrict == VINF_SUCCESS)
1395 {
1396 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1397# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1398 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1399# else
1400 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1401# endif
1402 pVCpu->iem.s.offOpcode = offOpcode + 2;
1403 }
1404 else
1405 *pu16 = 0;
1406 return rcStrict;
1407}
1408
1409#else /* IEM_WITH_SETJMP */
1410
1411/**
1412 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1413 *
1414 * @returns The opcode word.
1415 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1416 */
1417uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1418{
1419# ifdef IEM_WITH_CODE_TLB
1420 uint16_t u16;
1421 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1422 return u16;
1423# else
1424 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1425 if (rcStrict == VINF_SUCCESS)
1426 {
1427 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1428 pVCpu->iem.s.offOpcode += 2;
1429# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1430 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1431# else
1432 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1433# endif
1434 }
1435 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1436# endif
1437}
1438
1439#endif /* IEM_WITH_SETJMP */
1440
1441#ifndef IEM_WITH_SETJMP
1442
1443/**
1444 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1445 *
1446 * @returns Strict VBox status code.
1447 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1448 * @param pu32 Where to return the opcode double word.
1449 */
1450VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1451{
1452 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1453 if (rcStrict == VINF_SUCCESS)
1454 {
1455 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1456 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1457 pVCpu->iem.s.offOpcode = offOpcode + 2;
1458 }
1459 else
1460 *pu32 = 0;
1461 return rcStrict;
1462}
1463
1464
1465/**
1466 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1467 *
1468 * @returns Strict VBox status code.
1469 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1470 * @param pu64 Where to return the opcode quad word.
1471 */
1472VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1473{
1474 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1475 if (rcStrict == VINF_SUCCESS)
1476 {
1477 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1478 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1479 pVCpu->iem.s.offOpcode = offOpcode + 2;
1480 }
1481 else
1482 *pu64 = 0;
1483 return rcStrict;
1484}
1485
1486#endif /* !IEM_WITH_SETJMP */
1487
1488#ifndef IEM_WITH_SETJMP
1489
1490/**
1491 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1492 *
1493 * @returns Strict VBox status code.
1494 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1495 * @param pu32 Where to return the opcode dword.
1496 */
1497VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1498{
1499 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1500 if (rcStrict == VINF_SUCCESS)
1501 {
1502 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1503# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1504 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1505# else
1506 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1507 pVCpu->iem.s.abOpcode[offOpcode + 1],
1508 pVCpu->iem.s.abOpcode[offOpcode + 2],
1509 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1510# endif
1511 pVCpu->iem.s.offOpcode = offOpcode + 4;
1512 }
1513 else
1514 *pu32 = 0;
1515 return rcStrict;
1516}
1517
1518#else /* IEM_WITH_SETJMP */
1519
1520/**
1521 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1522 *
1523 * @returns The opcode dword.
1524 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1525 */
1526uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1527{
1528# ifdef IEM_WITH_CODE_TLB
1529 uint32_t u32;
1530 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1531 return u32;
1532# else
1533 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1534 if (rcStrict == VINF_SUCCESS)
1535 {
1536 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1537 pVCpu->iem.s.offOpcode = offOpcode + 4;
1538# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1539 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1540# else
1541 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1542 pVCpu->iem.s.abOpcode[offOpcode + 1],
1543 pVCpu->iem.s.abOpcode[offOpcode + 2],
1544 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1545# endif
1546 }
1547 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1548# endif
1549}
1550
1551#endif /* IEM_WITH_SETJMP */
1552
1553#ifndef IEM_WITH_SETJMP
1554
1555/**
1556 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1557 *
1558 * @returns Strict VBox status code.
1559 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1560 * @param pu64 Where to return the opcode dword.
1561 */
1562VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1563{
1564 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1565 if (rcStrict == VINF_SUCCESS)
1566 {
1567 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1568 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1569 pVCpu->iem.s.abOpcode[offOpcode + 1],
1570 pVCpu->iem.s.abOpcode[offOpcode + 2],
1571 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1572 pVCpu->iem.s.offOpcode = offOpcode + 4;
1573 }
1574 else
1575 *pu64 = 0;
1576 return rcStrict;
1577}
1578
1579
1580/**
1581 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1582 *
1583 * @returns Strict VBox status code.
1584 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1585 * @param pu64 Where to return the opcode qword.
1586 */
1587VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1588{
1589 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1590 if (rcStrict == VINF_SUCCESS)
1591 {
1592 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1593 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1594 pVCpu->iem.s.abOpcode[offOpcode + 1],
1595 pVCpu->iem.s.abOpcode[offOpcode + 2],
1596 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1597 pVCpu->iem.s.offOpcode = offOpcode + 4;
1598 }
1599 else
1600 *pu64 = 0;
1601 return rcStrict;
1602}
1603
1604#endif /* !IEM_WITH_SETJMP */
1605
1606#ifndef IEM_WITH_SETJMP
1607
1608/**
1609 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1610 *
1611 * @returns Strict VBox status code.
1612 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1613 * @param pu64 Where to return the opcode qword.
1614 */
1615VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1616{
1617 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1618 if (rcStrict == VINF_SUCCESS)
1619 {
1620 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1621# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1622 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1623# else
1624 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1625 pVCpu->iem.s.abOpcode[offOpcode + 1],
1626 pVCpu->iem.s.abOpcode[offOpcode + 2],
1627 pVCpu->iem.s.abOpcode[offOpcode + 3],
1628 pVCpu->iem.s.abOpcode[offOpcode + 4],
1629 pVCpu->iem.s.abOpcode[offOpcode + 5],
1630 pVCpu->iem.s.abOpcode[offOpcode + 6],
1631 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1632# endif
1633 pVCpu->iem.s.offOpcode = offOpcode + 8;
1634 }
1635 else
1636 *pu64 = 0;
1637 return rcStrict;
1638}
1639
1640#else /* IEM_WITH_SETJMP */
1641
1642/**
1643 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1644 *
1645 * @returns The opcode qword.
1646 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1647 */
1648uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1649{
1650# ifdef IEM_WITH_CODE_TLB
1651 uint64_t u64;
1652 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1653 return u64;
1654# else
1655 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1656 if (rcStrict == VINF_SUCCESS)
1657 {
1658 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1659 pVCpu->iem.s.offOpcode = offOpcode + 8;
1660# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1661 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1662# else
1663 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1664 pVCpu->iem.s.abOpcode[offOpcode + 1],
1665 pVCpu->iem.s.abOpcode[offOpcode + 2],
1666 pVCpu->iem.s.abOpcode[offOpcode + 3],
1667 pVCpu->iem.s.abOpcode[offOpcode + 4],
1668 pVCpu->iem.s.abOpcode[offOpcode + 5],
1669 pVCpu->iem.s.abOpcode[offOpcode + 6],
1670 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1671# endif
1672 }
1673 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1674# endif
1675}
1676
1677#endif /* IEM_WITH_SETJMP */
1678
1679
1680
1681/** @name Misc Worker Functions.
1682 * @{
1683 */
1684
1685/**
1686 * Gets the exception class for the specified exception vector.
1687 *
1688 * @returns The class of the specified exception.
1689 * @param uVector The exception vector.
1690 */
1691static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
1692{
1693 Assert(uVector <= X86_XCPT_LAST);
1694 switch (uVector)
1695 {
1696 case X86_XCPT_DE:
1697 case X86_XCPT_TS:
1698 case X86_XCPT_NP:
1699 case X86_XCPT_SS:
1700 case X86_XCPT_GP:
1701 case X86_XCPT_SX: /* AMD only */
1702 return IEMXCPTCLASS_CONTRIBUTORY;
1703
1704 case X86_XCPT_PF:
1705 case X86_XCPT_VE: /* Intel only */
1706 return IEMXCPTCLASS_PAGE_FAULT;
1707
1708 case X86_XCPT_DF:
1709 return IEMXCPTCLASS_DOUBLE_FAULT;
1710 }
1711 return IEMXCPTCLASS_BENIGN;
1712}
1713
1714
1715/**
1716 * Evaluates how to handle an exception caused during delivery of another event
1717 * (exception / interrupt).
1718 *
1719 * @returns How to handle the recursive exception.
1720 * @param pVCpu The cross context virtual CPU structure of the
1721 * calling thread.
1722 * @param fPrevFlags The flags of the previous event.
1723 * @param uPrevVector The vector of the previous event.
1724 * @param fCurFlags The flags of the current exception.
1725 * @param uCurVector The vector of the current exception.
1726 * @param pfXcptRaiseInfo Where to store additional information about the
1727 * exception condition. Optional.
1728 */
1729VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
1730 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
1731{
1732 /*
1733 * Only CPU exceptions can be raised while delivering other events, software interrupt
1734 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
1735 */
1736 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
1737 Assert(pVCpu); RT_NOREF(pVCpu);
1738 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
1739
1740 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
1741 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
1742 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1743 {
1744 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
1745 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
1746 {
1747 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
1748 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
1749 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
1750 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
1751 {
1752 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1753 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
1754 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
1755 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
1756 uCurVector, pVCpu->cpum.GstCtx.cr2));
1757 }
1758 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1759 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
1760 {
1761 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1762 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
1763 }
1764 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
1765 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1766 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
1767 {
1768 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
1769 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
1770 }
1771 }
1772 else
1773 {
1774 if (uPrevVector == X86_XCPT_NMI)
1775 {
1776 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
1777 if (uCurVector == X86_XCPT_PF)
1778 {
1779 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
1780 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
1781 }
1782 }
1783 else if ( uPrevVector == X86_XCPT_AC
1784 && uCurVector == X86_XCPT_AC)
1785 {
1786 enmRaise = IEMXCPTRAISE_CPU_HANG;
1787 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
1788 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
1789 }
1790 }
1791 }
1792 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
1793 {
1794 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
1795 if (uCurVector == X86_XCPT_PF)
1796 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
1797 }
1798 else
1799 {
1800 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
1801 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
1802 }
1803
1804 if (pfXcptRaiseInfo)
1805 *pfXcptRaiseInfo = fRaiseInfo;
1806 return enmRaise;
1807}
1808
1809
1810/**
1811 * Enters the CPU shutdown state initiated by a triple fault or other
1812 * unrecoverable conditions.
1813 *
1814 * @returns Strict VBox status code.
1815 * @param pVCpu The cross context virtual CPU structure of the
1816 * calling thread.
1817 */
1818static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
1819{
1820 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1821 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
1822
1823 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
1824 {
1825 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
1826 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1827 }
1828
1829 RT_NOREF(pVCpu);
1830 return VINF_EM_TRIPLE_FAULT;
1831}
1832
1833
1834/**
1835 * Validates a new SS segment.
1836 *
1837 * @returns VBox strict status code.
1838 * @param pVCpu The cross context virtual CPU structure of the
1839 * calling thread.
1840 * @param NewSS The new SS selctor.
1841 * @param uCpl The CPL to load the stack for.
1842 * @param pDesc Where to return the descriptor.
1843 */
1844static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
1845{
1846 /* Null selectors are not allowed (we're not called for dispatching
1847 interrupts with SS=0 in long mode). */
1848 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1849 {
1850 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
1851 return iemRaiseTaskSwitchFault0(pVCpu);
1852 }
1853
1854 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1855 if ((NewSS & X86_SEL_RPL) != uCpl)
1856 {
1857 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1858 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1859 }
1860
1861 /*
1862 * Read the descriptor.
1863 */
1864 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
1865 if (rcStrict != VINF_SUCCESS)
1866 return rcStrict;
1867
1868 /*
1869 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1870 */
1871 if (!pDesc->Legacy.Gen.u1DescType)
1872 {
1873 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1874 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1875 }
1876
1877 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1878 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1879 {
1880 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1881 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1882 }
1883 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1884 {
1885 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1886 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1887 }
1888
1889 /* Is it there? */
1890 /** @todo testcase: Is this checked before the canonical / limit check below? */
1891 if (!pDesc->Legacy.Gen.u1Present)
1892 {
1893 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1894 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
1895 }
1896
1897 return VINF_SUCCESS;
1898}
1899
1900/** @} */
1901
1902
1903/** @name Raising Exceptions.
1904 *
1905 * @{
1906 */
1907
1908
1909/**
1910 * Loads the specified stack far pointer from the TSS.
1911 *
1912 * @returns VBox strict status code.
1913 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1914 * @param uCpl The CPL to load the stack for.
1915 * @param pSelSS Where to return the new stack segment.
1916 * @param puEsp Where to return the new stack pointer.
1917 */
1918static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
1919{
1920 VBOXSTRICTRC rcStrict;
1921 Assert(uCpl < 4);
1922
1923 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1924 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
1925 {
1926 /*
1927 * 16-bit TSS (X86TSS16).
1928 */
1929 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1930 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1931 {
1932 uint32_t off = uCpl * 4 + 2;
1933 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1934 {
1935 /** @todo check actual access pattern here. */
1936 uint32_t u32Tmp = 0; /* gcc maybe... */
1937 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1938 if (rcStrict == VINF_SUCCESS)
1939 {
1940 *puEsp = RT_LOWORD(u32Tmp);
1941 *pSelSS = RT_HIWORD(u32Tmp);
1942 return VINF_SUCCESS;
1943 }
1944 }
1945 else
1946 {
1947 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1948 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1949 }
1950 break;
1951 }
1952
1953 /*
1954 * 32-bit TSS (X86TSS32).
1955 */
1956 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1957 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1958 {
1959 uint32_t off = uCpl * 8 + 4;
1960 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1961 {
1962/** @todo check actual access pattern here. */
1963 uint64_t u64Tmp;
1964 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1965 if (rcStrict == VINF_SUCCESS)
1966 {
1967 *puEsp = u64Tmp & UINT32_MAX;
1968 *pSelSS = (RTSEL)(u64Tmp >> 32);
1969 return VINF_SUCCESS;
1970 }
1971 }
1972 else
1973 {
1974 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1975 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1976 }
1977 break;
1978 }
1979
1980 default:
1981 AssertFailed();
1982 rcStrict = VERR_IEM_IPE_4;
1983 break;
1984 }
1985
1986 *puEsp = 0; /* make gcc happy */
1987 *pSelSS = 0; /* make gcc happy */
1988 return rcStrict;
1989}
1990
1991
1992/**
1993 * Loads the specified stack pointer from the 64-bit TSS.
1994 *
1995 * @returns VBox strict status code.
1996 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1997 * @param uCpl The CPL to load the stack for.
1998 * @param uIst The interrupt stack table index, 0 if to use uCpl.
1999 * @param puRsp Where to return the new stack pointer.
2000 */
2001static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
2002{
2003 Assert(uCpl < 4);
2004 Assert(uIst < 8);
2005 *puRsp = 0; /* make gcc happy */
2006
2007 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2008 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
2009
2010 uint32_t off;
2011 if (uIst)
2012 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
2013 else
2014 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
2015 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
2016 {
2017 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
2018 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2019 }
2020
2021 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2022}
2023
2024
2025/**
2026 * Adjust the CPU state according to the exception being raised.
2027 *
2028 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2029 * @param u8Vector The exception that has been raised.
2030 */
2031DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
2032{
2033 switch (u8Vector)
2034 {
2035 case X86_XCPT_DB:
2036 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
2037 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
2038 break;
2039 /** @todo Read the AMD and Intel exception reference... */
2040 }
2041}
2042
2043
2044/**
2045 * Implements exceptions and interrupts for real mode.
2046 *
2047 * @returns VBox strict status code.
2048 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2049 * @param cbInstr The number of bytes to offset rIP by in the return
2050 * address.
2051 * @param u8Vector The interrupt / exception vector number.
2052 * @param fFlags The flags.
2053 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2054 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2055 */
2056static VBOXSTRICTRC
2057iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
2058 uint8_t cbInstr,
2059 uint8_t u8Vector,
2060 uint32_t fFlags,
2061 uint16_t uErr,
2062 uint64_t uCr2) RT_NOEXCEPT
2063{
2064 NOREF(uErr); NOREF(uCr2);
2065 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2066
2067 /*
2068 * Read the IDT entry.
2069 */
2070 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2071 {
2072 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2073 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2074 }
2075 RTFAR16 Idte;
2076 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
2077 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2078 {
2079 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2080 return rcStrict;
2081 }
2082
2083 /*
2084 * Push the stack frame.
2085 */
2086 uint16_t *pu16Frame;
2087 uint64_t uNewRsp;
2088 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &uNewRsp);
2089 if (rcStrict != VINF_SUCCESS)
2090 return rcStrict;
2091
2092 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
2093#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
2094 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
2095 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
2096 fEfl |= UINT16_C(0xf000);
2097#endif
2098 pu16Frame[2] = (uint16_t)fEfl;
2099 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
2100 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
2101 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
2102 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2103 return rcStrict;
2104
2105 /*
2106 * Load the vector address into cs:ip and make exception specific state
2107 * adjustments.
2108 */
2109 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
2110 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
2111 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2112 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
2113 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2114 pVCpu->cpum.GstCtx.rip = Idte.off;
2115 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
2116 IEMMISC_SET_EFL(pVCpu, fEfl);
2117
2118 /** @todo do we actually do this in real mode? */
2119 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2120 iemRaiseXcptAdjustState(pVCpu, u8Vector);
2121
2122 /* The IEM_F_MODE_XXX and IEM_F_X86_CPL_MASK doesn't really change here,
2123 so best leave them alone in case we're in a weird kind of real mode... */
2124
2125 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2126}
2127
2128
2129/**
2130 * Loads a NULL data selector into when coming from V8086 mode.
2131 *
2132 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2133 * @param pSReg Pointer to the segment register.
2134 */
2135DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
2136{
2137 pSReg->Sel = 0;
2138 pSReg->ValidSel = 0;
2139 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2140 {
2141 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2142 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2143 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2144 }
2145 else
2146 {
2147 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2148 /** @todo check this on AMD-V */
2149 pSReg->u64Base = 0;
2150 pSReg->u32Limit = 0;
2151 }
2152}
2153
2154
2155/**
2156 * Loads a segment selector during a task switch in V8086 mode.
2157 *
2158 * @param pSReg Pointer to the segment register.
2159 * @param uSel The selector value to load.
2160 */
2161DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
2162{
2163 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2164 pSReg->Sel = uSel;
2165 pSReg->ValidSel = uSel;
2166 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2167 pSReg->u64Base = uSel << 4;
2168 pSReg->u32Limit = 0xffff;
2169 pSReg->Attr.u = 0xf3;
2170}
2171
2172
2173/**
2174 * Loads a segment selector during a task switch in protected mode.
2175 *
2176 * In this task switch scenario, we would throw \#TS exceptions rather than
2177 * \#GPs.
2178 *
2179 * @returns VBox strict status code.
2180 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2181 * @param pSReg Pointer to the segment register.
2182 * @param uSel The new selector value.
2183 *
2184 * @remarks This does _not_ handle CS or SS.
2185 * @remarks This expects IEM_GET_CPL(pVCpu) to return an up to date value.
2186 */
2187static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
2188{
2189 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2190
2191 /* Null data selector. */
2192 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2193 {
2194 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2195 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2196 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2197 return VINF_SUCCESS;
2198 }
2199
2200 /* Fetch the descriptor. */
2201 IEMSELDESC Desc;
2202 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2203 if (rcStrict != VINF_SUCCESS)
2204 {
2205 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2206 VBOXSTRICTRC_VAL(rcStrict)));
2207 return rcStrict;
2208 }
2209
2210 /* Must be a data segment or readable code segment. */
2211 if ( !Desc.Legacy.Gen.u1DescType
2212 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2213 {
2214 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2215 Desc.Legacy.Gen.u4Type));
2216 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2217 }
2218
2219 /* Check privileges for data segments and non-conforming code segments. */
2220 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2221 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2222 {
2223 /* The RPL and the new CPL must be less than or equal to the DPL. */
2224 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2225 || (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl))
2226 {
2227 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2228 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
2229 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2230 }
2231 }
2232
2233 /* Is it there? */
2234 if (!Desc.Legacy.Gen.u1Present)
2235 {
2236 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2237 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2238 }
2239
2240 /* The base and limit. */
2241 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2242 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2243
2244 /*
2245 * Ok, everything checked out fine. Now set the accessed bit before
2246 * committing the result into the registers.
2247 */
2248 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2249 {
2250 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2251 if (rcStrict != VINF_SUCCESS)
2252 return rcStrict;
2253 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2254 }
2255
2256 /* Commit */
2257 pSReg->Sel = uSel;
2258 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2259 pSReg->u32Limit = cbLimit;
2260 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2261 pSReg->ValidSel = uSel;
2262 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2263 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2264 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2265
2266 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2267 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2268 return VINF_SUCCESS;
2269}
2270
2271
2272/**
2273 * Performs a task switch.
2274 *
2275 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2276 * caller is responsible for performing the necessary checks (like DPL, TSS
2277 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2278 * reference for JMP, CALL, IRET.
2279 *
2280 * If the task switch is the due to a software interrupt or hardware exception,
2281 * the caller is responsible for validating the TSS selector and descriptor. See
2282 * Intel Instruction reference for INT n.
2283 *
2284 * @returns VBox strict status code.
2285 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2286 * @param enmTaskSwitch The cause of the task switch.
2287 * @param uNextEip The EIP effective after the task switch.
2288 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2289 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2290 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2291 * @param SelTSS The TSS selector of the new task.
2292 * @param pNewDescTSS Pointer to the new TSS descriptor.
2293 */
2294VBOXSTRICTRC
2295iemTaskSwitch(PVMCPUCC pVCpu,
2296 IEMTASKSWITCH enmTaskSwitch,
2297 uint32_t uNextEip,
2298 uint32_t fFlags,
2299 uint16_t uErr,
2300 uint64_t uCr2,
2301 RTSEL SelTSS,
2302 PIEMSELDESC pNewDescTSS) RT_NOEXCEPT
2303{
2304 Assert(!IEM_IS_REAL_MODE(pVCpu));
2305 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2306 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2307
2308 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
2309 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2310 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2311 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2312 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2313
2314 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2315 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2316
2317 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
2318 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
2319
2320 /* Update CR2 in case it's a page-fault. */
2321 /** @todo This should probably be done much earlier in IEM/PGM. See
2322 * @bugref{5653#c49}. */
2323 if (fFlags & IEM_XCPT_FLAGS_CR2)
2324 pVCpu->cpum.GstCtx.cr2 = uCr2;
2325
2326 /*
2327 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2328 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2329 */
2330 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
2331 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2332 if (uNewTSSLimit < uNewTSSLimitMin)
2333 {
2334 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
2335 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
2336 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2337 }
2338
2339 /*
2340 * Task switches in VMX non-root mode always cause task switches.
2341 * The new TSS must have been read and validated (DPL, limits etc.) before a
2342 * task-switch VM-exit commences.
2343 *
2344 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2345 */
2346 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2347 {
2348 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
2349 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
2350 }
2351
2352 /*
2353 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2354 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2355 */
2356 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2357 {
2358 uint32_t const uExitInfo1 = SelTSS;
2359 uint32_t uExitInfo2 = uErr;
2360 switch (enmTaskSwitch)
2361 {
2362 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2363 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2364 default: break;
2365 }
2366 if (fFlags & IEM_XCPT_FLAGS_ERR)
2367 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2368 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2369 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2370
2371 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2372 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2373 RT_NOREF2(uExitInfo1, uExitInfo2);
2374 }
2375
2376 /*
2377 * Check the current TSS limit. The last written byte to the current TSS during the
2378 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2379 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2380 *
2381 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2382 * end up with smaller than "legal" TSS limits.
2383 */
2384 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2385 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
2386 if (uCurTSSLimit < uCurTSSLimitMin)
2387 {
2388 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
2389 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
2390 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2391 }
2392
2393 /*
2394 * Verify that the new TSS can be accessed and map it. Map only the required contents
2395 * and not the entire TSS.
2396 */
2397 void *pvNewTSS;
2398 uint32_t const cbNewTSS = uNewTSSLimitMin + 1;
2399 RTGCPTR const GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
2400 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2401 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2402 * not perform correct translation if this happens. See Intel spec. 7.2.1
2403 * "Task-State Segment". */
2404 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW, 0);
2405 if (rcStrict != VINF_SUCCESS)
2406 {
2407 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
2408 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
2409 return rcStrict;
2410 }
2411
2412 /*
2413 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2414 */
2415 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
2416 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2417 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2418 {
2419 PX86DESC pDescCurTSS;
2420 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
2421 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2422 if (rcStrict != VINF_SUCCESS)
2423 {
2424 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2425 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2426 return rcStrict;
2427 }
2428
2429 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2430 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
2431 if (rcStrict != VINF_SUCCESS)
2432 {
2433 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2434 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2435 return rcStrict;
2436 }
2437
2438 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2439 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2440 {
2441 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2442 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2443 fEFlags &= ~X86_EFL_NT;
2444 }
2445 }
2446
2447 /*
2448 * Save the CPU state into the current TSS.
2449 */
2450 RTGCPTR const GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
2451 if (GCPtrNewTSS == GCPtrCurTSS)
2452 {
2453 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
2454 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2455 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,
2456 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2457 pVCpu->cpum.GstCtx.ldtr.Sel));
2458 }
2459 if (fIsNewTSS386)
2460 {
2461 /*
2462 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2463 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2464 */
2465 void *pvCurTSS32;
2466 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
2467 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2468 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2469 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2470 if (rcStrict != VINF_SUCCESS)
2471 {
2472 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2473 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2474 return rcStrict;
2475 }
2476
2477 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2478 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
2479 pCurTSS32->eip = uNextEip;
2480 pCurTSS32->eflags = fEFlags;
2481 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
2482 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
2483 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
2484 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
2485 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
2486 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
2487 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
2488 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
2489 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
2490 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2491 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2492 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2493 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2494 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2495
2496 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
2497 if (rcStrict != VINF_SUCCESS)
2498 {
2499 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2500 VBOXSTRICTRC_VAL(rcStrict)));
2501 return rcStrict;
2502 }
2503 }
2504 else
2505 {
2506 /*
2507 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2508 */
2509 void *pvCurTSS16;
2510 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
2511 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2512 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2513 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2514 if (rcStrict != VINF_SUCCESS)
2515 {
2516 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2517 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2518 return rcStrict;
2519 }
2520
2521 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2522 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
2523 pCurTSS16->ip = uNextEip;
2524 pCurTSS16->flags = (uint16_t)fEFlags;
2525 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
2526 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
2527 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
2528 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
2529 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
2530 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
2531 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
2532 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
2533 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
2534 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2535 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2536 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2537
2538 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
2539 if (rcStrict != VINF_SUCCESS)
2540 {
2541 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2542 VBOXSTRICTRC_VAL(rcStrict)));
2543 return rcStrict;
2544 }
2545 }
2546
2547 /*
2548 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2549 */
2550 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2551 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2552 {
2553 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2554 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
2555 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2556 }
2557
2558 /*
2559 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2560 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2561 */
2562 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2563 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2564 bool fNewDebugTrap;
2565 if (fIsNewTSS386)
2566 {
2567 PCX86TSS32 pNewTSS32 = (PCX86TSS32)pvNewTSS;
2568 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
2569 uNewEip = pNewTSS32->eip;
2570 uNewEflags = pNewTSS32->eflags;
2571 uNewEax = pNewTSS32->eax;
2572 uNewEcx = pNewTSS32->ecx;
2573 uNewEdx = pNewTSS32->edx;
2574 uNewEbx = pNewTSS32->ebx;
2575 uNewEsp = pNewTSS32->esp;
2576 uNewEbp = pNewTSS32->ebp;
2577 uNewEsi = pNewTSS32->esi;
2578 uNewEdi = pNewTSS32->edi;
2579 uNewES = pNewTSS32->es;
2580 uNewCS = pNewTSS32->cs;
2581 uNewSS = pNewTSS32->ss;
2582 uNewDS = pNewTSS32->ds;
2583 uNewFS = pNewTSS32->fs;
2584 uNewGS = pNewTSS32->gs;
2585 uNewLdt = pNewTSS32->selLdt;
2586 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
2587 }
2588 else
2589 {
2590 PCX86TSS16 pNewTSS16 = (PCX86TSS16)pvNewTSS;
2591 uNewCr3 = 0;
2592 uNewEip = pNewTSS16->ip;
2593 uNewEflags = pNewTSS16->flags;
2594 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
2595 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
2596 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
2597 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
2598 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
2599 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
2600 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
2601 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
2602 uNewES = pNewTSS16->es;
2603 uNewCS = pNewTSS16->cs;
2604 uNewSS = pNewTSS16->ss;
2605 uNewDS = pNewTSS16->ds;
2606 uNewFS = 0;
2607 uNewGS = 0;
2608 uNewLdt = pNewTSS16->selLdt;
2609 fNewDebugTrap = false;
2610 }
2611
2612 if (GCPtrNewTSS == GCPtrCurTSS)
2613 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2614 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2615
2616 /*
2617 * We're done accessing the new TSS.
2618 */
2619 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
2620 if (rcStrict != VINF_SUCCESS)
2621 {
2622 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2623 return rcStrict;
2624 }
2625
2626 /*
2627 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2628 */
2629 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2630 {
2631 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
2632 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2633 if (rcStrict != VINF_SUCCESS)
2634 {
2635 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2636 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2637 return rcStrict;
2638 }
2639
2640 /* Check that the descriptor indicates the new TSS is available (not busy). */
2641 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2642 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2643 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
2644
2645 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2646 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
2647 if (rcStrict != VINF_SUCCESS)
2648 {
2649 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2650 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2651 return rcStrict;
2652 }
2653 }
2654
2655 /*
2656 * From this point on, we're technically in the new task. We will defer exceptions
2657 * until the completion of the task switch but before executing any instructions in the new task.
2658 */
2659 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
2660 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
2661 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2662 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
2663 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
2664 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
2665 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
2666
2667 /* Set the busy bit in TR. */
2668 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2669
2670 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2671 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2672 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2673 {
2674 uNewEflags |= X86_EFL_NT;
2675 }
2676
2677 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2678 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
2679 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
2680
2681 pVCpu->cpum.GstCtx.eip = uNewEip;
2682 pVCpu->cpum.GstCtx.eax = uNewEax;
2683 pVCpu->cpum.GstCtx.ecx = uNewEcx;
2684 pVCpu->cpum.GstCtx.edx = uNewEdx;
2685 pVCpu->cpum.GstCtx.ebx = uNewEbx;
2686 pVCpu->cpum.GstCtx.esp = uNewEsp;
2687 pVCpu->cpum.GstCtx.ebp = uNewEbp;
2688 pVCpu->cpum.GstCtx.esi = uNewEsi;
2689 pVCpu->cpum.GstCtx.edi = uNewEdi;
2690
2691 uNewEflags &= X86_EFL_LIVE_MASK;
2692 uNewEflags |= X86_EFL_RA1_MASK;
2693 IEMMISC_SET_EFL(pVCpu, uNewEflags);
2694
2695 /*
2696 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2697 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2698 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2699 */
2700 pVCpu->cpum.GstCtx.es.Sel = uNewES;
2701 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
2702
2703 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2704 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
2705
2706 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2707 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
2708
2709 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
2710 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
2711
2712 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
2713 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
2714
2715 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
2716 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
2717 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2718
2719 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
2720 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2721 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
2722 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
2723
2724 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2725 {
2726 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
2727 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
2728 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
2729 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
2730 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
2731 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
2732 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2733 }
2734
2735 /*
2736 * Switch CR3 for the new task.
2737 */
2738 if ( fIsNewTSS386
2739 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
2740 {
2741 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2742 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
2743 AssertRCSuccessReturn(rc, rc);
2744
2745 /* Inform PGM. */
2746 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
2747 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
2748 AssertRCReturn(rc, rc);
2749 /* ignore informational status codes */
2750
2751 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
2752 }
2753
2754 /*
2755 * Switch LDTR for the new task.
2756 */
2757 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2758 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
2759 else
2760 {
2761 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2762
2763 IEMSELDESC DescNewLdt;
2764 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2765 if (rcStrict != VINF_SUCCESS)
2766 {
2767 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2768 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2769 return rcStrict;
2770 }
2771 if ( !DescNewLdt.Legacy.Gen.u1Present
2772 || DescNewLdt.Legacy.Gen.u1DescType
2773 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2774 {
2775 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2776 uNewLdt, DescNewLdt.Legacy.u));
2777 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2778 }
2779
2780 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
2781 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2782 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2783 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2784 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2785 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2786 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2787 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
2788 }
2789
2790 IEMSELDESC DescSS;
2791 if (IEM_IS_V86_MODE(pVCpu))
2792 {
2793 IEM_SET_CPL(pVCpu, 3);
2794 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
2795 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
2796 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
2797 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
2798 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
2799 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
2800
2801 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
2802 DescSS.Legacy.u = 0;
2803 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
2804 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
2805 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
2806 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
2807 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
2808 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
2809 DescSS.Legacy.Gen.u2Dpl = 3;
2810 }
2811 else
2812 {
2813 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
2814
2815 /*
2816 * Load the stack segment for the new task.
2817 */
2818 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2819 {
2820 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2821 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2822 }
2823
2824 /* Fetch the descriptor. */
2825 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
2826 if (rcStrict != VINF_SUCCESS)
2827 {
2828 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2829 VBOXSTRICTRC_VAL(rcStrict)));
2830 return rcStrict;
2831 }
2832
2833 /* SS must be a data segment and writable. */
2834 if ( !DescSS.Legacy.Gen.u1DescType
2835 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2836 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2837 {
2838 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2839 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2840 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2841 }
2842
2843 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2844 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2845 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2846 {
2847 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2848 uNewCpl));
2849 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2850 }
2851
2852 /* Is it there? */
2853 if (!DescSS.Legacy.Gen.u1Present)
2854 {
2855 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
2856 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2857 }
2858
2859 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
2860 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
2861
2862 /* Set the accessed bit before committing the result into SS. */
2863 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2864 {
2865 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
2866 if (rcStrict != VINF_SUCCESS)
2867 return rcStrict;
2868 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2869 }
2870
2871 /* Commit SS. */
2872 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2873 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
2874 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2875 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
2876 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
2877 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2878 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
2879
2880 /* CPL has changed, update IEM before loading rest of segments. */
2881 IEM_SET_CPL(pVCpu, uNewCpl);
2882
2883 /*
2884 * Load the data segments for the new task.
2885 */
2886 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
2887 if (rcStrict != VINF_SUCCESS)
2888 return rcStrict;
2889 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
2890 if (rcStrict != VINF_SUCCESS)
2891 return rcStrict;
2892 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
2893 if (rcStrict != VINF_SUCCESS)
2894 return rcStrict;
2895 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
2896 if (rcStrict != VINF_SUCCESS)
2897 return rcStrict;
2898
2899 /*
2900 * Load the code segment for the new task.
2901 */
2902 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
2903 {
2904 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
2905 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2906 }
2907
2908 /* Fetch the descriptor. */
2909 IEMSELDESC DescCS;
2910 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
2911 if (rcStrict != VINF_SUCCESS)
2912 {
2913 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
2914 return rcStrict;
2915 }
2916
2917 /* CS must be a code segment. */
2918 if ( !DescCS.Legacy.Gen.u1DescType
2919 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2920 {
2921 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
2922 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
2923 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2924 }
2925
2926 /* For conforming CS, DPL must be less than or equal to the RPL. */
2927 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2928 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
2929 {
2930 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
2931 DescCS.Legacy.Gen.u2Dpl));
2932 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2933 }
2934
2935 /* For non-conforming CS, DPL must match RPL. */
2936 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2937 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
2938 {
2939 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
2940 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
2941 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2942 }
2943
2944 /* Is it there? */
2945 if (!DescCS.Legacy.Gen.u1Present)
2946 {
2947 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
2948 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2949 }
2950
2951 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
2952 u64Base = X86DESC_BASE(&DescCS.Legacy);
2953
2954 /* Set the accessed bit before committing the result into CS. */
2955 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2956 {
2957 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
2958 if (rcStrict != VINF_SUCCESS)
2959 return rcStrict;
2960 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2961 }
2962
2963 /* Commit CS. */
2964 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2965 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
2966 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2967 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
2968 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2969 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2970 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
2971 }
2972
2973 /* Make sure the CPU mode is correct. */
2974 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
2975 if (fExecNew != pVCpu->iem.s.fExec)
2976 Log(("iemTaskSwitch: fExec %#x -> %#x (xor %#x)\n", pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
2977 pVCpu->iem.s.fExec = fExecNew;
2978
2979 /** @todo Debug trap. */
2980 if (fIsNewTSS386 && fNewDebugTrap)
2981 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
2982
2983 /*
2984 * Construct the error code masks based on what caused this task switch.
2985 * See Intel Instruction reference for INT.
2986 */
2987 uint16_t uExt;
2988 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
2989 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2990 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
2991 uExt = 1;
2992 else
2993 uExt = 0;
2994
2995 /*
2996 * Push any error code on to the new stack.
2997 */
2998 if (fFlags & IEM_XCPT_FLAGS_ERR)
2999 {
3000 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3001 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3002 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
3003
3004 /* Check that there is sufficient space on the stack. */
3005 /** @todo Factor out segment limit checking for normal/expand down segments
3006 * into a separate function. */
3007 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3008 {
3009 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
3010 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
3011 {
3012 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3013 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
3014 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3015 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3016 }
3017 }
3018 else
3019 {
3020 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
3021 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3022 {
3023 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
3024 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3025 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3026 }
3027 }
3028
3029
3030 if (fIsNewTSS386)
3031 rcStrict = iemMemStackPushU32(pVCpu, uErr);
3032 else
3033 rcStrict = iemMemStackPushU16(pVCpu, uErr);
3034 if (rcStrict != VINF_SUCCESS)
3035 {
3036 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
3037 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
3038 return rcStrict;
3039 }
3040 }
3041
3042 /* Check the new EIP against the new CS limit. */
3043 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
3044 {
3045 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
3046 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
3047 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3048 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
3049 }
3050
3051 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3052 pVCpu->cpum.GstCtx.ss.Sel));
3053 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3054}
3055
3056
3057/**
3058 * Implements exceptions and interrupts for protected mode.
3059 *
3060 * @returns VBox strict status code.
3061 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3062 * @param cbInstr The number of bytes to offset rIP by in the return
3063 * address.
3064 * @param u8Vector The interrupt / exception vector number.
3065 * @param fFlags The flags.
3066 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3067 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3068 */
3069static VBOXSTRICTRC
3070iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
3071 uint8_t cbInstr,
3072 uint8_t u8Vector,
3073 uint32_t fFlags,
3074 uint16_t uErr,
3075 uint64_t uCr2) RT_NOEXCEPT
3076{
3077 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3078
3079 /*
3080 * Read the IDT entry.
3081 */
3082 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3083 {
3084 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3085 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3086 }
3087 X86DESC Idte;
3088 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
3089 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
3090 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3091 {
3092 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3093 return rcStrict;
3094 }
3095 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x - from %04x:%08RX64 efl=%#x depth=%d\n",
3096 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3097 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow,
3098 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->iem.s.cXcptRecursions));
3099
3100 /*
3101 * Check the descriptor type, DPL and such.
3102 * ASSUMES this is done in the same order as described for call-gate calls.
3103 */
3104 if (Idte.Gate.u1DescType)
3105 {
3106 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3107 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3108 }
3109 bool fTaskGate = false;
3110 uint8_t f32BitGate = true;
3111 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3112 switch (Idte.Gate.u4Type)
3113 {
3114 case X86_SEL_TYPE_SYS_UNDEFINED:
3115 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3116 case X86_SEL_TYPE_SYS_LDT:
3117 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3118 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3119 case X86_SEL_TYPE_SYS_UNDEFINED2:
3120 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3121 case X86_SEL_TYPE_SYS_UNDEFINED3:
3122 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3123 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3124 case X86_SEL_TYPE_SYS_UNDEFINED4:
3125 {
3126 /** @todo check what actually happens when the type is wrong...
3127 * esp. call gates. */
3128 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3129 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3130 }
3131
3132 case X86_SEL_TYPE_SYS_286_INT_GATE:
3133 f32BitGate = false;
3134 RT_FALL_THRU();
3135 case X86_SEL_TYPE_SYS_386_INT_GATE:
3136 fEflToClear |= X86_EFL_IF;
3137 break;
3138
3139 case X86_SEL_TYPE_SYS_TASK_GATE:
3140 fTaskGate = true;
3141#ifndef IEM_IMPLEMENTS_TASKSWITCH
3142 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3143#endif
3144 break;
3145
3146 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3147 f32BitGate = false;
3148 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3149 break;
3150
3151 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3152 }
3153
3154 /* Check DPL against CPL if applicable. */
3155 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3156 {
3157 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3158 {
3159 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3160 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3161 }
3162 }
3163
3164 /* Is it there? */
3165 if (!Idte.Gate.u1Present)
3166 {
3167 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3168 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3169 }
3170
3171 /* Is it a task-gate? */
3172 if (fTaskGate)
3173 {
3174 /*
3175 * Construct the error code masks based on what caused this task switch.
3176 * See Intel Instruction reference for INT.
3177 */
3178 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3179 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
3180 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3181 RTSEL SelTSS = Idte.Gate.u16Sel;
3182
3183 /*
3184 * Fetch the TSS descriptor in the GDT.
3185 */
3186 IEMSELDESC DescTSS;
3187 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
3188 if (rcStrict != VINF_SUCCESS)
3189 {
3190 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
3191 VBOXSTRICTRC_VAL(rcStrict)));
3192 return rcStrict;
3193 }
3194
3195 /* The TSS descriptor must be a system segment and be available (not busy). */
3196 if ( DescTSS.Legacy.Gen.u1DescType
3197 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3198 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3199 {
3200 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3201 u8Vector, SelTSS, DescTSS.Legacy.au64));
3202 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
3203 }
3204
3205 /* The TSS must be present. */
3206 if (!DescTSS.Legacy.Gen.u1Present)
3207 {
3208 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
3209 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
3210 }
3211
3212 /* Do the actual task switch. */
3213 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3214 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3215 fFlags, uErr, uCr2, SelTSS, &DescTSS);
3216 }
3217
3218 /* A null CS is bad. */
3219 RTSEL NewCS = Idte.Gate.u16Sel;
3220 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3221 {
3222 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3223 return iemRaiseGeneralProtectionFault0(pVCpu);
3224 }
3225
3226 /* Fetch the descriptor for the new CS. */
3227 IEMSELDESC DescCS;
3228 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3229 if (rcStrict != VINF_SUCCESS)
3230 {
3231 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3232 return rcStrict;
3233 }
3234
3235 /* Must be a code segment. */
3236 if (!DescCS.Legacy.Gen.u1DescType)
3237 {
3238 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3239 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3240 }
3241 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3242 {
3243 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3244 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3245 }
3246
3247 /* Don't allow lowering the privilege level. */
3248 /** @todo Does the lowering of privileges apply to software interrupts
3249 * only? This has bearings on the more-privileged or
3250 * same-privilege stack behavior further down. A testcase would
3251 * be nice. */
3252 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3253 {
3254 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3255 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3256 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3257 }
3258
3259 /* Make sure the selector is present. */
3260 if (!DescCS.Legacy.Gen.u1Present)
3261 {
3262 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3263 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3264 }
3265
3266#ifdef LOG_ENABLED
3267 /* If software interrupt, try decode it if logging is enabled and such. */
3268 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3269 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
3270 iemLogSyscallProtModeInt(pVCpu, u8Vector, cbInstr);
3271#endif
3272
3273 /* Check the new EIP against the new CS limit. */
3274 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3275 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3276 ? Idte.Gate.u16OffsetLow
3277 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3278 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3279 if (uNewEip > cbLimitCS)
3280 {
3281 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3282 u8Vector, uNewEip, cbLimitCS, NewCS));
3283 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3284 }
3285 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3286
3287 /* Calc the flag image to push. */
3288 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3289 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3290 fEfl &= ~X86_EFL_RF;
3291 else
3292 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3293
3294 /* From V8086 mode only go to CPL 0. */
3295 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3296 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3297 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3298 {
3299 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3300 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3301 }
3302
3303 /*
3304 * If the privilege level changes, we need to get a new stack from the TSS.
3305 * This in turns means validating the new SS and ESP...
3306 */
3307 if (uNewCpl != IEM_GET_CPL(pVCpu))
3308 {
3309 RTSEL NewSS;
3310 uint32_t uNewEsp;
3311 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3312 if (rcStrict != VINF_SUCCESS)
3313 return rcStrict;
3314
3315 IEMSELDESC DescSS;
3316 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3317 if (rcStrict != VINF_SUCCESS)
3318 return rcStrict;
3319 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3320 if (!DescSS.Legacy.Gen.u1DefBig)
3321 {
3322 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3323 uNewEsp = (uint16_t)uNewEsp;
3324 }
3325
3326 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3327
3328 /* Check that there is sufficient space for the stack frame. */
3329 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3330 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3331 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3332 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3333
3334 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3335 {
3336 if ( uNewEsp - 1 > cbLimitSS
3337 || uNewEsp < cbStackFrame)
3338 {
3339 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3340 u8Vector, NewSS, uNewEsp, cbStackFrame));
3341 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3342 }
3343 }
3344 else
3345 {
3346 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3347 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3348 {
3349 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3350 u8Vector, NewSS, uNewEsp, cbStackFrame));
3351 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3352 }
3353 }
3354
3355 /*
3356 * Start making changes.
3357 */
3358
3359 /* Set the new CPL so that stack accesses use it. */
3360 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3361 IEM_SET_CPL(pVCpu, uNewCpl);
3362
3363 /* Create the stack frame. */
3364 RTPTRUNION uStackFrame;
3365 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3366 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
3367 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3368 if (rcStrict != VINF_SUCCESS)
3369 return rcStrict;
3370 void * const pvStackFrame = uStackFrame.pv;
3371 if (f32BitGate)
3372 {
3373 if (fFlags & IEM_XCPT_FLAGS_ERR)
3374 *uStackFrame.pu32++ = uErr;
3375 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3376 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3377 uStackFrame.pu32[2] = fEfl;
3378 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3379 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3380 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3381 if (fEfl & X86_EFL_VM)
3382 {
3383 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3384 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3385 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3386 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3387 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3388 }
3389 }
3390 else
3391 {
3392 if (fFlags & IEM_XCPT_FLAGS_ERR)
3393 *uStackFrame.pu16++ = uErr;
3394 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3395 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3396 uStackFrame.pu16[2] = fEfl;
3397 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3398 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3399 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3400 if (fEfl & X86_EFL_VM)
3401 {
3402 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3403 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3404 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3405 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3406 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3407 }
3408 }
3409 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3410 if (rcStrict != VINF_SUCCESS)
3411 return rcStrict;
3412
3413 /* Mark the selectors 'accessed' (hope this is the correct time). */
3414 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3415 * after pushing the stack frame? (Write protect the gdt + stack to
3416 * find out.) */
3417 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3418 {
3419 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3420 if (rcStrict != VINF_SUCCESS)
3421 return rcStrict;
3422 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3423 }
3424
3425 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3426 {
3427 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3428 if (rcStrict != VINF_SUCCESS)
3429 return rcStrict;
3430 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3431 }
3432
3433 /*
3434 * Start comitting the register changes (joins with the DPL=CPL branch).
3435 */
3436 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3437 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3438 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3439 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3440 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3441 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3442 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3443 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3444 * SP is loaded).
3445 * Need to check the other combinations too:
3446 * - 16-bit TSS, 32-bit handler
3447 * - 32-bit TSS, 16-bit handler */
3448 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3449 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3450 else
3451 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3452
3453 if (fEfl & X86_EFL_VM)
3454 {
3455 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3456 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3457 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3458 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3459 }
3460 }
3461 /*
3462 * Same privilege, no stack change and smaller stack frame.
3463 */
3464 else
3465 {
3466 uint64_t uNewRsp;
3467 RTPTRUNION uStackFrame;
3468 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3469 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1, &uStackFrame.pv, &uNewRsp);
3470 if (rcStrict != VINF_SUCCESS)
3471 return rcStrict;
3472 void * const pvStackFrame = uStackFrame.pv;
3473
3474 if (f32BitGate)
3475 {
3476 if (fFlags & IEM_XCPT_FLAGS_ERR)
3477 *uStackFrame.pu32++ = uErr;
3478 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3479 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3480 uStackFrame.pu32[2] = fEfl;
3481 }
3482 else
3483 {
3484 if (fFlags & IEM_XCPT_FLAGS_ERR)
3485 *uStackFrame.pu16++ = uErr;
3486 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3487 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3488 uStackFrame.pu16[2] = fEfl;
3489 }
3490 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
3491 if (rcStrict != VINF_SUCCESS)
3492 return rcStrict;
3493
3494 /* Mark the CS selector as 'accessed'. */
3495 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3496 {
3497 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3498 if (rcStrict != VINF_SUCCESS)
3499 return rcStrict;
3500 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3501 }
3502
3503 /*
3504 * Start committing the register changes (joins with the other branch).
3505 */
3506 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3507 }
3508
3509 /* ... register committing continues. */
3510 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3511 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3512 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3513 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3514 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3515 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3516
3517 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3518 fEfl &= ~fEflToClear;
3519 IEMMISC_SET_EFL(pVCpu, fEfl);
3520
3521 if (fFlags & IEM_XCPT_FLAGS_CR2)
3522 pVCpu->cpum.GstCtx.cr2 = uCr2;
3523
3524 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3525 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3526
3527 /* Make sure the execution flags are correct. */
3528 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3529 if (fExecNew != pVCpu->iem.s.fExec)
3530 Log(("iemRaiseXcptOrIntInProtMode: fExec %#x -> %#x (xor %#x)\n",
3531 pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3532 pVCpu->iem.s.fExec = fExecNew;
3533 Assert(IEM_GET_CPL(pVCpu) == uNewCpl);
3534
3535 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3536}
3537
3538
3539/**
3540 * Implements exceptions and interrupts for long mode.
3541 *
3542 * @returns VBox strict status code.
3543 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3544 * @param cbInstr The number of bytes to offset rIP by in the return
3545 * address.
3546 * @param u8Vector The interrupt / exception vector number.
3547 * @param fFlags The flags.
3548 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3549 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3550 */
3551static VBOXSTRICTRC
3552iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3553 uint8_t cbInstr,
3554 uint8_t u8Vector,
3555 uint32_t fFlags,
3556 uint16_t uErr,
3557 uint64_t uCr2) RT_NOEXCEPT
3558{
3559 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3560
3561 /*
3562 * Read the IDT entry.
3563 */
3564 uint16_t offIdt = (uint16_t)u8Vector << 4;
3565 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3566 {
3567 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3568 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3569 }
3570 X86DESC64 Idte;
3571#ifdef _MSC_VER /* Shut up silly compiler warning. */
3572 Idte.au64[0] = 0;
3573 Idte.au64[1] = 0;
3574#endif
3575 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3576 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3577 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3578 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3579 {
3580 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3581 return rcStrict;
3582 }
3583 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3584 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3585 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3586
3587 /*
3588 * Check the descriptor type, DPL and such.
3589 * ASSUMES this is done in the same order as described for call-gate calls.
3590 */
3591 if (Idte.Gate.u1DescType)
3592 {
3593 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3594 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3595 }
3596 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3597 switch (Idte.Gate.u4Type)
3598 {
3599 case AMD64_SEL_TYPE_SYS_INT_GATE:
3600 fEflToClear |= X86_EFL_IF;
3601 break;
3602 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3603 break;
3604
3605 default:
3606 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3607 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3608 }
3609
3610 /* Check DPL against CPL if applicable. */
3611 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3612 {
3613 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3614 {
3615 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3616 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3617 }
3618 }
3619
3620 /* Is it there? */
3621 if (!Idte.Gate.u1Present)
3622 {
3623 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3624 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3625 }
3626
3627 /* A null CS is bad. */
3628 RTSEL NewCS = Idte.Gate.u16Sel;
3629 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3630 {
3631 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3632 return iemRaiseGeneralProtectionFault0(pVCpu);
3633 }
3634
3635 /* Fetch the descriptor for the new CS. */
3636 IEMSELDESC DescCS;
3637 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
3638 if (rcStrict != VINF_SUCCESS)
3639 {
3640 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3641 return rcStrict;
3642 }
3643
3644 /* Must be a 64-bit code segment. */
3645 if (!DescCS.Long.Gen.u1DescType)
3646 {
3647 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3648 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3649 }
3650 if ( !DescCS.Long.Gen.u1Long
3651 || DescCS.Long.Gen.u1DefBig
3652 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3653 {
3654 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3655 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3656 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3657 }
3658
3659 /* Don't allow lowering the privilege level. For non-conforming CS
3660 selectors, the CS.DPL sets the privilege level the trap/interrupt
3661 handler runs at. For conforming CS selectors, the CPL remains
3662 unchanged, but the CS.DPL must be <= CPL. */
3663 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3664 * when CPU in Ring-0. Result \#GP? */
3665 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3666 {
3667 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3668 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3669 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3670 }
3671
3672
3673 /* Make sure the selector is present. */
3674 if (!DescCS.Legacy.Gen.u1Present)
3675 {
3676 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3677 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3678 }
3679
3680 /* Check that the new RIP is canonical. */
3681 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3682 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3683 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3684 if (!IEM_IS_CANONICAL(uNewRip))
3685 {
3686 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3687 return iemRaiseGeneralProtectionFault0(pVCpu);
3688 }
3689
3690 /*
3691 * If the privilege level changes or if the IST isn't zero, we need to get
3692 * a new stack from the TSS.
3693 */
3694 uint64_t uNewRsp;
3695 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3696 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3697 if ( uNewCpl != IEM_GET_CPL(pVCpu)
3698 || Idte.Gate.u3IST != 0)
3699 {
3700 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3701 if (rcStrict != VINF_SUCCESS)
3702 return rcStrict;
3703 }
3704 else
3705 uNewRsp = pVCpu->cpum.GstCtx.rsp;
3706 uNewRsp &= ~(uint64_t)0xf;
3707
3708 /*
3709 * Calc the flag image to push.
3710 */
3711 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3712 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3713 fEfl &= ~X86_EFL_RF;
3714 else
3715 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3716
3717 /*
3718 * Start making changes.
3719 */
3720 /* Set the new CPL so that stack accesses use it. */
3721 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3722 IEM_SET_CPL(pVCpu, uNewCpl);
3723/** @todo Setting CPL this early seems wrong as it would affect and errors we
3724 * raise accessing the stack and (?) GDT/LDT... */
3725
3726 /* Create the stack frame. */
3727 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3728 RTPTRUNION uStackFrame;
3729 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3730 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3731 if (rcStrict != VINF_SUCCESS)
3732 return rcStrict;
3733 void * const pvStackFrame = uStackFrame.pv;
3734
3735 if (fFlags & IEM_XCPT_FLAGS_ERR)
3736 *uStackFrame.pu64++ = uErr;
3737 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
3738 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
3739 uStackFrame.pu64[2] = fEfl;
3740 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
3741 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
3742 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3743 if (rcStrict != VINF_SUCCESS)
3744 return rcStrict;
3745
3746 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3747 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3748 * after pushing the stack frame? (Write protect the gdt + stack to
3749 * find out.) */
3750 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3751 {
3752 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3753 if (rcStrict != VINF_SUCCESS)
3754 return rcStrict;
3755 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3756 }
3757
3758 /*
3759 * Start comitting the register changes.
3760 */
3761 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3762 * hidden registers when interrupting 32-bit or 16-bit code! */
3763 if (uNewCpl != uOldCpl)
3764 {
3765 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
3766 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
3767 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3768 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3769 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3770 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3771 }
3772 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
3773 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3774 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3775 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3776 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3777 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3778 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3779 pVCpu->cpum.GstCtx.rip = uNewRip;
3780
3781 fEfl &= ~fEflToClear;
3782 IEMMISC_SET_EFL(pVCpu, fEfl);
3783
3784 if (fFlags & IEM_XCPT_FLAGS_CR2)
3785 pVCpu->cpum.GstCtx.cr2 = uCr2;
3786
3787 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3788 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3789
3790 iemRecalcExecModeAndCplFlags(pVCpu);
3791
3792 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3793}
3794
3795
3796/**
3797 * Implements exceptions and interrupts.
3798 *
3799 * All exceptions and interrupts goes thru this function!
3800 *
3801 * @returns VBox strict status code.
3802 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3803 * @param cbInstr The number of bytes to offset rIP by in the return
3804 * address.
3805 * @param u8Vector The interrupt / exception vector number.
3806 * @param fFlags The flags.
3807 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3808 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3809 */
3810VBOXSTRICTRC
3811iemRaiseXcptOrInt(PVMCPUCC pVCpu,
3812 uint8_t cbInstr,
3813 uint8_t u8Vector,
3814 uint32_t fFlags,
3815 uint16_t uErr,
3816 uint64_t uCr2) RT_NOEXCEPT
3817{
3818 /*
3819 * Get all the state that we might need here.
3820 */
3821 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3822 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3823
3824#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
3825 /*
3826 * Flush prefetch buffer
3827 */
3828 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3829#endif
3830
3831 /*
3832 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3833 */
3834 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
3835 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
3836 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
3837 | IEM_XCPT_FLAGS_BP_INSTR
3838 | IEM_XCPT_FLAGS_ICEBP_INSTR
3839 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3840 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
3841 {
3842 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3843 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3844 u8Vector = X86_XCPT_GP;
3845 uErr = 0;
3846 }
3847#ifdef DBGFTRACE_ENABLED
3848 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3849 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3850 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
3851#endif
3852
3853 /*
3854 * Evaluate whether NMI blocking should be in effect.
3855 * Normally, NMI blocking is in effect whenever we inject an NMI.
3856 */
3857 bool fBlockNmi = u8Vector == X86_XCPT_NMI
3858 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);
3859
3860#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3861 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3862 {
3863 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
3864 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3865 return rcStrict0;
3866
3867 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
3868 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
3869 {
3870 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
3871 fBlockNmi = false;
3872 }
3873 }
3874#endif
3875
3876#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3877 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
3878 {
3879 /*
3880 * If the event is being injected as part of VMRUN, it isn't subject to event
3881 * intercepts in the nested-guest. However, secondary exceptions that occur
3882 * during injection of any event -are- subject to exception intercepts.
3883 *
3884 * See AMD spec. 15.20 "Event Injection".
3885 */
3886 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
3887 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
3888 else
3889 {
3890 /*
3891 * Check and handle if the event being raised is intercepted.
3892 */
3893 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3894 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
3895 return rcStrict0;
3896 }
3897 }
3898#endif
3899
3900 /*
3901 * Set NMI blocking if necessary.
3902 */
3903 if (fBlockNmi)
3904 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
3905
3906 /*
3907 * Do recursion accounting.
3908 */
3909 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
3910 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
3911 if (pVCpu->iem.s.cXcptRecursions == 0)
3912 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3913 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
3914 else
3915 {
3916 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3917 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
3918 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
3919
3920 if (pVCpu->iem.s.cXcptRecursions >= 4)
3921 {
3922#ifdef DEBUG_bird
3923 AssertFailed();
3924#endif
3925 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3926 }
3927
3928 /*
3929 * Evaluate the sequence of recurring events.
3930 */
3931 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
3932 NULL /* pXcptRaiseInfo */);
3933 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
3934 { /* likely */ }
3935 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
3936 {
3937 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
3938 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3939 u8Vector = X86_XCPT_DF;
3940 uErr = 0;
3941#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3942 /* VMX nested-guest #DF intercept needs to be checked here. */
3943 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3944 {
3945 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
3946 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3947 return rcStrict0;
3948 }
3949#endif
3950 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
3951 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
3952 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3953 }
3954 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
3955 {
3956 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
3957 return iemInitiateCpuShutdown(pVCpu);
3958 }
3959 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
3960 {
3961 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
3962 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
3963 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
3964 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
3965 return VERR_EM_GUEST_CPU_HANG;
3966 }
3967 else
3968 {
3969 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
3970 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
3971 return VERR_IEM_IPE_9;
3972 }
3973
3974 /*
3975 * The 'EXT' bit is set when an exception occurs during deliver of an external
3976 * event (such as an interrupt or earlier exception)[1]. Privileged software
3977 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
3978 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
3979 *
3980 * [1] - Intel spec. 6.13 "Error Code"
3981 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
3982 * [3] - Intel Instruction reference for INT n.
3983 */
3984 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
3985 && (fFlags & IEM_XCPT_FLAGS_ERR)
3986 && u8Vector != X86_XCPT_PF
3987 && u8Vector != X86_XCPT_DF)
3988 {
3989 uErr |= X86_TRAP_ERR_EXTERNAL;
3990 }
3991 }
3992
3993 pVCpu->iem.s.cXcptRecursions++;
3994 pVCpu->iem.s.uCurXcpt = u8Vector;
3995 pVCpu->iem.s.fCurXcpt = fFlags;
3996 pVCpu->iem.s.uCurXcptErr = uErr;
3997 pVCpu->iem.s.uCurXcptCr2 = uCr2;
3998
3999 /*
4000 * Extensive logging.
4001 */
4002#if defined(LOG_ENABLED) && defined(IN_RING3)
4003 if (LogIs3Enabled())
4004 {
4005 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
4006 PVM pVM = pVCpu->CTX_SUFF(pVM);
4007 char szRegs[4096];
4008 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4009 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4010 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4011 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4012 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4013 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4014 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4015 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4016 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4017 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4018 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4019 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4020 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4021 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4022 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4023 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4024 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4025 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4026 " efer=%016VR{efer}\n"
4027 " pat=%016VR{pat}\n"
4028 " sf_mask=%016VR{sf_mask}\n"
4029 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4030 " lstar=%016VR{lstar}\n"
4031 " star=%016VR{star} cstar=%016VR{cstar}\n"
4032 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4033 );
4034
4035 char szInstr[256];
4036 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4037 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4038 szInstr, sizeof(szInstr), NULL);
4039 Log3(("%s%s\n", szRegs, szInstr));
4040 }
4041#endif /* LOG_ENABLED */
4042
4043 /*
4044 * Stats.
4045 */
4046 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
4047 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
4048 else if (u8Vector <= X86_XCPT_LAST)
4049 {
4050 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
4051 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
4052 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, ASMReadTSC());
4053 }
4054
4055 /*
4056 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
4057 * to ensure that a stale TLB or paging cache entry will only cause one
4058 * spurious #PF.
4059 */
4060 if ( u8Vector == X86_XCPT_PF
4061 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
4062 IEMTlbInvalidatePage(pVCpu, uCr2);
4063
4064 /*
4065 * Call the mode specific worker function.
4066 */
4067 VBOXSTRICTRC rcStrict;
4068 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4069 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4070 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
4071 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4072 else
4073 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4074
4075 /* Flush the prefetch buffer. */
4076 iemOpcodeFlushHeavy(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
4077
4078 /*
4079 * Unwind.
4080 */
4081 pVCpu->iem.s.cXcptRecursions--;
4082 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
4083 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
4084 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
4085 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel,
4086 pVCpu->cpum.GstCtx.esp, IEM_GET_CPL(pVCpu), pVCpu->iem.s.cXcptRecursions + 1));
4087 return rcStrict;
4088}
4089
4090#ifdef IEM_WITH_SETJMP
4091/**
4092 * See iemRaiseXcptOrInt. Will not return.
4093 */
4094DECL_NO_RETURN(void)
4095iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
4096 uint8_t cbInstr,
4097 uint8_t u8Vector,
4098 uint32_t fFlags,
4099 uint16_t uErr,
4100 uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP
4101{
4102 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4103 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
4104}
4105#endif
4106
4107
4108/** \#DE - 00. */
4109VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
4110{
4111 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4112}
4113
4114
4115/** \#DB - 01.
4116 * @note This automatically clear DR7.GD. */
4117VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
4118{
4119 /* This always clears RF (via IEM_XCPT_FLAGS_DRx_INSTR_BP). */
4120 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
4121 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_DRx_INSTR_BP, 0, 0);
4122}
4123
4124
4125/** \#BR - 05. */
4126VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
4127{
4128 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4129}
4130
4131
4132/** \#UD - 06. */
4133VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
4134{
4135 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4136}
4137
4138
4139/** \#NM - 07. */
4140VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
4141{
4142 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4143}
4144
4145
4146/** \#TS(err) - 0a. */
4147VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4148{
4149 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4150}
4151
4152
4153/** \#TS(tr) - 0a. */
4154VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
4155{
4156 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4157 pVCpu->cpum.GstCtx.tr.Sel, 0);
4158}
4159
4160
4161/** \#TS(0) - 0a. */
4162VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4163{
4164 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4165 0, 0);
4166}
4167
4168
4169/** \#TS(err) - 0a. */
4170VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4171{
4172 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4173 uSel & X86_SEL_MASK_OFF_RPL, 0);
4174}
4175
4176
4177/** \#NP(err) - 0b. */
4178VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4179{
4180 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4181}
4182
4183
4184/** \#NP(sel) - 0b. */
4185VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4186{
4187 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4188 uSel & ~X86_SEL_RPL, 0);
4189}
4190
4191
4192/** \#SS(seg) - 0c. */
4193VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4194{
4195 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4196 uSel & ~X86_SEL_RPL, 0);
4197}
4198
4199
4200/** \#SS(err) - 0c. */
4201VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4202{
4203 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4204}
4205
4206
4207/** \#GP(n) - 0d. */
4208VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4209{
4210 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4211}
4212
4213
4214/** \#GP(0) - 0d. */
4215VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4216{
4217 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4218}
4219
4220#ifdef IEM_WITH_SETJMP
4221/** \#GP(0) - 0d. */
4222DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4223{
4224 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4225}
4226#endif
4227
4228
4229/** \#GP(sel) - 0d. */
4230VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4231{
4232 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4233 Sel & ~X86_SEL_RPL, 0);
4234}
4235
4236
4237/** \#GP(0) - 0d. */
4238VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4239{
4240 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4241}
4242
4243
4244/** \#GP(sel) - 0d. */
4245VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4246{
4247 NOREF(iSegReg); NOREF(fAccess);
4248 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4249 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4250}
4251
4252#ifdef IEM_WITH_SETJMP
4253/** \#GP(sel) - 0d, longjmp. */
4254DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4255{
4256 NOREF(iSegReg); NOREF(fAccess);
4257 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4258 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4259}
4260#endif
4261
4262/** \#GP(sel) - 0d. */
4263VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4264{
4265 NOREF(Sel);
4266 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4267}
4268
4269#ifdef IEM_WITH_SETJMP
4270/** \#GP(sel) - 0d, longjmp. */
4271DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP
4272{
4273 NOREF(Sel);
4274 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4275}
4276#endif
4277
4278
4279/** \#GP(sel) - 0d. */
4280VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4281{
4282 NOREF(iSegReg); NOREF(fAccess);
4283 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4284}
4285
4286#ifdef IEM_WITH_SETJMP
4287/** \#GP(sel) - 0d, longjmp. */
4288DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4289{
4290 NOREF(iSegReg); NOREF(fAccess);
4291 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4292}
4293#endif
4294
4295
4296/** \#PF(n) - 0e. */
4297VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT
4298{
4299 uint16_t uErr;
4300 switch (rc)
4301 {
4302 case VERR_PAGE_NOT_PRESENT:
4303 case VERR_PAGE_TABLE_NOT_PRESENT:
4304 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4305 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4306 uErr = 0;
4307 break;
4308
4309 default:
4310 AssertMsgFailed(("%Rrc\n", rc));
4311 RT_FALL_THRU();
4312 case VERR_ACCESS_DENIED:
4313 uErr = X86_TRAP_PF_P;
4314 break;
4315
4316 /** @todo reserved */
4317 }
4318
4319 if (IEM_GET_CPL(pVCpu) == 3)
4320 uErr |= X86_TRAP_PF_US;
4321
4322 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4323 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4324 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4325 uErr |= X86_TRAP_PF_ID;
4326
4327#if 0 /* This is so much non-sense, really. Why was it done like that? */
4328 /* Note! RW access callers reporting a WRITE protection fault, will clear
4329 the READ flag before calling. So, read-modify-write accesses (RW)
4330 can safely be reported as READ faults. */
4331 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4332 uErr |= X86_TRAP_PF_RW;
4333#else
4334 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4335 {
4336 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4337 /// (regardless of outcome of the comparison in the latter case).
4338 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4339 uErr |= X86_TRAP_PF_RW;
4340 }
4341#endif
4342
4343 /* For FXSAVE and FRSTOR the #PF is typically reported at the max address
4344 of the memory operand rather than at the start of it. (Not sure what
4345 happens if it crosses a page boundrary.) The current heuristics for
4346 this is to report the #PF for the last byte if the access is more than
4347 64 bytes. This is probably not correct, but we can work that out later,
4348 main objective now is to get FXSAVE to work like for real hardware and
4349 make bs3-cpu-basic2 work. */
4350 if (cbAccess <= 64)
4351 { /* likely*/ }
4352 else
4353 GCPtrWhere += cbAccess - 1;
4354
4355 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4356 uErr, GCPtrWhere);
4357}
4358
4359#ifdef IEM_WITH_SETJMP
4360/** \#PF(n) - 0e, longjmp. */
4361DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess,
4362 uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP
4363{
4364 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, cbAccess, fAccess, rc)));
4365}
4366#endif
4367
4368
4369/** \#MF(0) - 10. */
4370VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT
4371{
4372 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)
4373 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4374
4375 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
4376 PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
4377 return iemRegUpdateRipAndFinishClearingRF(pVCpu);
4378}
4379
4380
4381/** \#AC(0) - 11. */
4382VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT
4383{
4384 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4385}
4386
4387#ifdef IEM_WITH_SETJMP
4388/** \#AC(0) - 11, longjmp. */
4389DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4390{
4391 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4392}
4393#endif
4394
4395
4396/** \#XF(0)/\#XM(0) - 19. */
4397VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
4398{
4399 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4400}
4401
4402
4403/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4404IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4405{
4406 NOREF(cbInstr);
4407 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4408}
4409
4410
4411/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4412IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4413{
4414 NOREF(cbInstr);
4415 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4416}
4417
4418
4419/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4420IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4421{
4422 NOREF(cbInstr);
4423 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4424}
4425
4426
4427/** @} */
4428
4429/** @name Common opcode decoders.
4430 * @{
4431 */
4432//#include <iprt/mem.h>
4433
4434/**
4435 * Used to add extra details about a stub case.
4436 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4437 */
4438void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4439{
4440#if defined(LOG_ENABLED) && defined(IN_RING3)
4441 PVM pVM = pVCpu->CTX_SUFF(pVM);
4442 char szRegs[4096];
4443 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4444 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4445 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4446 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4447 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4448 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4449 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4450 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4451 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4452 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4453 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4454 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4455 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4456 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4457 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4458 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4459 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4460 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4461 " efer=%016VR{efer}\n"
4462 " pat=%016VR{pat}\n"
4463 " sf_mask=%016VR{sf_mask}\n"
4464 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4465 " lstar=%016VR{lstar}\n"
4466 " star=%016VR{star} cstar=%016VR{cstar}\n"
4467 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4468 );
4469
4470 char szInstr[256];
4471 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4472 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4473 szInstr, sizeof(szInstr), NULL);
4474
4475 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4476#else
4477 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4478#endif
4479}
4480
4481/** @} */
4482
4483
4484
4485/** @name Register Access.
4486 * @{
4487 */
4488
4489/**
4490 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4491 *
4492 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4493 * segment limit.
4494 *
4495 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4496 * @param cbInstr Instruction size.
4497 * @param offNextInstr The offset of the next instruction.
4498 * @param enmEffOpSize Effective operand size.
4499 */
4500VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
4501 IEMMODE enmEffOpSize) RT_NOEXCEPT
4502{
4503 switch (enmEffOpSize)
4504 {
4505 case IEMMODE_16BIT:
4506 {
4507 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
4508 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4509 || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */))
4510 pVCpu->cpum.GstCtx.rip = uNewIp;
4511 else
4512 return iemRaiseGeneralProtectionFault0(pVCpu);
4513 break;
4514 }
4515
4516 case IEMMODE_32BIT:
4517 {
4518 Assert(!IEM_IS_64BIT_CODE(pVCpu));
4519 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4520
4521 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
4522 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4523 pVCpu->cpum.GstCtx.rip = uNewEip;
4524 else
4525 return iemRaiseGeneralProtectionFault0(pVCpu);
4526 break;
4527 }
4528
4529 case IEMMODE_64BIT:
4530 {
4531 Assert(IEM_IS_64BIT_CODE(pVCpu));
4532
4533 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4534 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4535 pVCpu->cpum.GstCtx.rip = uNewRip;
4536 else
4537 return iemRaiseGeneralProtectionFault0(pVCpu);
4538 break;
4539 }
4540
4541 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4542 }
4543
4544#ifndef IEM_WITH_CODE_TLB
4545 /* Flush the prefetch buffer. */
4546 pVCpu->iem.s.cbOpcode = cbInstr;
4547#endif
4548
4549 /*
4550 * Clear RF and finish the instruction (maybe raise #DB).
4551 */
4552 return iemRegFinishClearingRF(pVCpu);
4553}
4554
4555
4556/**
4557 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4558 *
4559 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4560 * segment limit.
4561 *
4562 * @returns Strict VBox status code.
4563 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4564 * @param cbInstr Instruction size.
4565 * @param offNextInstr The offset of the next instruction.
4566 */
4567VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT
4568{
4569 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
4570
4571 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
4572 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4573 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checking in 64-bit mode */))
4574 pVCpu->cpum.GstCtx.rip = uNewIp;
4575 else
4576 return iemRaiseGeneralProtectionFault0(pVCpu);
4577
4578#ifndef IEM_WITH_CODE_TLB
4579 /* Flush the prefetch buffer. */
4580 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4581#endif
4582
4583 /*
4584 * Clear RF and finish the instruction (maybe raise #DB).
4585 */
4586 return iemRegFinishClearingRF(pVCpu);
4587}
4588
4589
4590/**
4591 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4592 *
4593 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4594 * segment limit.
4595 *
4596 * @returns Strict VBox status code.
4597 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4598 * @param cbInstr Instruction size.
4599 * @param offNextInstr The offset of the next instruction.
4600 * @param enmEffOpSize Effective operand size.
4601 */
4602VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
4603 IEMMODE enmEffOpSize) RT_NOEXCEPT
4604{
4605 if (enmEffOpSize == IEMMODE_32BIT)
4606 {
4607 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
4608
4609 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
4610 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4611 pVCpu->cpum.GstCtx.rip = uNewEip;
4612 else
4613 return iemRaiseGeneralProtectionFault0(pVCpu);
4614 }
4615 else
4616 {
4617 Assert(enmEffOpSize == IEMMODE_64BIT);
4618
4619 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4620 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4621 pVCpu->cpum.GstCtx.rip = uNewRip;
4622 else
4623 return iemRaiseGeneralProtectionFault0(pVCpu);
4624 }
4625
4626#ifndef IEM_WITH_CODE_TLB
4627 /* Flush the prefetch buffer. */
4628 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4629#endif
4630
4631 /*
4632 * Clear RF and finish the instruction (maybe raise #DB).
4633 */
4634 return iemRegFinishClearingRF(pVCpu);
4635}
4636
4637
4638/**
4639 * Performs a near jump to the specified address.
4640 *
4641 * May raise a \#GP(0) if the new IP outside the code segment limit.
4642 *
4643 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4644 * @param uNewIp The new IP value.
4645 */
4646VBOXSTRICTRC iemRegRipJumpU16AndFinishClearningRF(PVMCPUCC pVCpu, uint16_t uNewIp) RT_NOEXCEPT
4647{
4648 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4649 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checks in 64-bit mode */))
4650 pVCpu->cpum.GstCtx.rip = uNewIp;
4651 else
4652 return iemRaiseGeneralProtectionFault0(pVCpu);
4653 /** @todo Test 16-bit jump in 64-bit mode. */
4654
4655#ifndef IEM_WITH_CODE_TLB
4656 /* Flush the prefetch buffer. */
4657 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4658#endif
4659
4660 /*
4661 * Clear RF and finish the instruction (maybe raise #DB).
4662 */
4663 return iemRegFinishClearingRF(pVCpu);
4664}
4665
4666
4667/**
4668 * Performs a near jump to the specified address.
4669 *
4670 * May raise a \#GP(0) if the new RIP is outside the code segment limit.
4671 *
4672 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4673 * @param uNewEip The new EIP value.
4674 */
4675VBOXSTRICTRC iemRegRipJumpU32AndFinishClearningRF(PVMCPUCC pVCpu, uint32_t uNewEip) RT_NOEXCEPT
4676{
4677 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4678 Assert(!IEM_IS_64BIT_CODE(pVCpu));
4679
4680 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4681 pVCpu->cpum.GstCtx.rip = uNewEip;
4682 else
4683 return iemRaiseGeneralProtectionFault0(pVCpu);
4684
4685#ifndef IEM_WITH_CODE_TLB
4686 /* Flush the prefetch buffer. */
4687 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4688#endif
4689
4690 /*
4691 * Clear RF and finish the instruction (maybe raise #DB).
4692 */
4693 return iemRegFinishClearingRF(pVCpu);
4694}
4695
4696
4697/**
4698 * Performs a near jump to the specified address.
4699 *
4700 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4701 * segment limit.
4702 *
4703 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4704 * @param uNewRip The new RIP value.
4705 */
4706VBOXSTRICTRC iemRegRipJumpU64AndFinishClearningRF(PVMCPUCC pVCpu, uint64_t uNewRip) RT_NOEXCEPT
4707{
4708 Assert(IEM_IS_64BIT_CODE(pVCpu));
4709
4710 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4711 pVCpu->cpum.GstCtx.rip = uNewRip;
4712 else
4713 return iemRaiseGeneralProtectionFault0(pVCpu);
4714
4715#ifndef IEM_WITH_CODE_TLB
4716 /* Flush the prefetch buffer. */
4717 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4718#endif
4719
4720 /*
4721 * Clear RF and finish the instruction (maybe raise #DB).
4722 */
4723 return iemRegFinishClearingRF(pVCpu);
4724}
4725
4726/** @} */
4727
4728
4729/** @name FPU access and helpers.
4730 *
4731 * @{
4732 */
4733
4734/**
4735 * Updates the x87.DS and FPUDP registers.
4736 *
4737 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4738 * @param pFpuCtx The FPU context.
4739 * @param iEffSeg The effective segment register.
4740 * @param GCPtrEff The effective address relative to @a iEffSeg.
4741 */
4742DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4743{
4744 RTSEL sel;
4745 switch (iEffSeg)
4746 {
4747 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
4748 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
4749 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
4750 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
4751 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
4752 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
4753 default:
4754 AssertMsgFailed(("%d\n", iEffSeg));
4755 sel = pVCpu->cpum.GstCtx.ds.Sel;
4756 }
4757 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
4758 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4759 {
4760 pFpuCtx->DS = 0;
4761 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
4762 }
4763 else if (!IEM_IS_LONG_MODE(pVCpu)) /** @todo this is weird. explain. */
4764 {
4765 pFpuCtx->DS = sel;
4766 pFpuCtx->FPUDP = GCPtrEff;
4767 }
4768 else
4769 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
4770}
4771
4772
4773/**
4774 * Rotates the stack registers in the push direction.
4775 *
4776 * @param pFpuCtx The FPU context.
4777 * @remarks This is a complete waste of time, but fxsave stores the registers in
4778 * stack order.
4779 */
4780DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
4781{
4782 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
4783 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
4784 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
4785 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
4786 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
4787 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
4788 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
4789 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
4790 pFpuCtx->aRegs[0].r80 = r80Tmp;
4791}
4792
4793
4794/**
4795 * Rotates the stack registers in the pop direction.
4796 *
4797 * @param pFpuCtx The FPU context.
4798 * @remarks This is a complete waste of time, but fxsave stores the registers in
4799 * stack order.
4800 */
4801DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
4802{
4803 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
4804 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
4805 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
4806 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
4807 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
4808 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
4809 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
4810 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
4811 pFpuCtx->aRegs[7].r80 = r80Tmp;
4812}
4813
4814
4815/**
4816 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
4817 * exception prevents it.
4818 *
4819 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4820 * @param pResult The FPU operation result to push.
4821 * @param pFpuCtx The FPU context.
4822 */
4823static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4824{
4825 /* Update FSW and bail if there are pending exceptions afterwards. */
4826 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4827 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4828 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4829 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4830 {
4831 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))
4832 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",
4833 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4834 pFpuCtx->FSW = fFsw;
4835 return;
4836 }
4837
4838 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4839 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4840 {
4841 /* All is fine, push the actual value. */
4842 pFpuCtx->FTW |= RT_BIT(iNewTop);
4843 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
4844 }
4845 else if (pFpuCtx->FCW & X86_FCW_IM)
4846 {
4847 /* Masked stack overflow, push QNaN. */
4848 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4849 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4850 }
4851 else
4852 {
4853 /* Raise stack overflow, don't push anything. */
4854 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4855 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4856 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",
4857 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4858 return;
4859 }
4860
4861 fFsw &= ~X86_FSW_TOP_MASK;
4862 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4863 pFpuCtx->FSW = fFsw;
4864
4865 iemFpuRotateStackPush(pFpuCtx);
4866 RT_NOREF(pVCpu);
4867}
4868
4869
4870/**
4871 * Stores a result in a FPU register and updates the FSW and FTW.
4872 *
4873 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4874 * @param pFpuCtx The FPU context.
4875 * @param pResult The result to store.
4876 * @param iStReg Which FPU register to store it in.
4877 */
4878static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4879{
4880 Assert(iStReg < 8);
4881 uint16_t fNewFsw = pFpuCtx->FSW;
4882 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;
4883 fNewFsw &= ~X86_FSW_C_MASK;
4884 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4885 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4886 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4887 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4888 pFpuCtx->FSW = fNewFsw;
4889 pFpuCtx->FTW |= RT_BIT(iReg);
4890 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
4891 RT_NOREF(pVCpu);
4892}
4893
4894
4895/**
4896 * Only updates the FPU status word (FSW) with the result of the current
4897 * instruction.
4898 *
4899 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4900 * @param pFpuCtx The FPU context.
4901 * @param u16FSW The FSW output of the current instruction.
4902 */
4903static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
4904{
4905 uint16_t fNewFsw = pFpuCtx->FSW;
4906 fNewFsw &= ~X86_FSW_C_MASK;
4907 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;
4908 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4909 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4910 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4911 pFpuCtx->FSW = fNewFsw;
4912 RT_NOREF(pVCpu);
4913}
4914
4915
4916/**
4917 * Pops one item off the FPU stack if no pending exception prevents it.
4918 *
4919 * @param pFpuCtx The FPU context.
4920 */
4921static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4922{
4923 /* Check pending exceptions. */
4924 uint16_t uFSW = pFpuCtx->FSW;
4925 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4926 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4927 return;
4928
4929 /* TOP--. */
4930 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
4931 uFSW &= ~X86_FSW_TOP_MASK;
4932 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4933 pFpuCtx->FSW = uFSW;
4934
4935 /* Mark the previous ST0 as empty. */
4936 iOldTop >>= X86_FSW_TOP_SHIFT;
4937 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
4938
4939 /* Rotate the registers. */
4940 iemFpuRotateStackPop(pFpuCtx);
4941}
4942
4943
4944/**
4945 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
4946 *
4947 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4948 * @param pResult The FPU operation result to push.
4949 * @param uFpuOpcode The FPU opcode value.
4950 */
4951void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
4952{
4953 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4954 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
4955 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4956}
4957
4958
4959/**
4960 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
4961 * and sets FPUDP and FPUDS.
4962 *
4963 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4964 * @param pResult The FPU operation result to push.
4965 * @param iEffSeg The effective segment register.
4966 * @param GCPtrEff The effective address relative to @a iEffSeg.
4967 * @param uFpuOpcode The FPU opcode value.
4968 */
4969void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff,
4970 uint16_t uFpuOpcode) RT_NOEXCEPT
4971{
4972 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4973 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4974 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
4975 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4976}
4977
4978
4979/**
4980 * Replace ST0 with the first value and push the second onto the FPU stack,
4981 * unless a pending exception prevents it.
4982 *
4983 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4984 * @param pResult The FPU operation result to store and push.
4985 * @param uFpuOpcode The FPU opcode value.
4986 */
4987void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
4988{
4989 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4990 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
4991
4992 /* Update FSW and bail if there are pending exceptions afterwards. */
4993 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4994 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4995 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4996 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4997 {
4998 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4999 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",
5000 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
5001 pFpuCtx->FSW = fFsw;
5002 return;
5003 }
5004
5005 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5006 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5007 {
5008 /* All is fine, push the actual value. */
5009 pFpuCtx->FTW |= RT_BIT(iNewTop);
5010 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
5011 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
5012 }
5013 else if (pFpuCtx->FCW & X86_FCW_IM)
5014 {
5015 /* Masked stack overflow, push QNaN. */
5016 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5017 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5018 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5019 }
5020 else
5021 {
5022 /* Raise stack overflow, don't push anything. */
5023 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5024 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5025 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",
5026 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5027 return;
5028 }
5029
5030 fFsw &= ~X86_FSW_TOP_MASK;
5031 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5032 pFpuCtx->FSW = fFsw;
5033
5034 iemFpuRotateStackPush(pFpuCtx);
5035}
5036
5037
5038/**
5039 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5040 * FOP.
5041 *
5042 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5043 * @param pResult The result to store.
5044 * @param iStReg Which FPU register to store it in.
5045 * @param uFpuOpcode The FPU opcode value.
5046 */
5047void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5048{
5049 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5050 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5051 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5052}
5053
5054
5055/**
5056 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5057 * FOP, and then pops the stack.
5058 *
5059 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5060 * @param pResult The result to store.
5061 * @param iStReg Which FPU register to store it in.
5062 * @param uFpuOpcode The FPU opcode value.
5063 */
5064void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5065{
5066 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5067 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5068 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5069 iemFpuMaybePopOne(pFpuCtx);
5070}
5071
5072
5073/**
5074 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5075 * FPUDP, and FPUDS.
5076 *
5077 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5078 * @param pResult The result to store.
5079 * @param iStReg Which FPU register to store it in.
5080 * @param iEffSeg The effective memory operand selector register.
5081 * @param GCPtrEff The effective memory operand offset.
5082 * @param uFpuOpcode The FPU opcode value.
5083 */
5084void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5085 uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5086{
5087 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5088 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5089 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5090 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5091}
5092
5093
5094/**
5095 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5096 * FPUDP, and FPUDS, and then pops the stack.
5097 *
5098 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5099 * @param pResult The result to store.
5100 * @param iStReg Which FPU register to store it in.
5101 * @param iEffSeg The effective memory operand selector register.
5102 * @param GCPtrEff The effective memory operand offset.
5103 * @param uFpuOpcode The FPU opcode value.
5104 */
5105void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
5106 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5107{
5108 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5109 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5110 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5111 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5112 iemFpuMaybePopOne(pFpuCtx);
5113}
5114
5115
5116/**
5117 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5118 *
5119 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5120 * @param uFpuOpcode The FPU opcode value.
5121 */
5122void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5123{
5124 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5125 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5126}
5127
5128
5129/**
5130 * Updates the FSW, FOP, FPUIP, and FPUCS.
5131 *
5132 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5133 * @param u16FSW The FSW from the current instruction.
5134 * @param uFpuOpcode The FPU opcode value.
5135 */
5136void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5137{
5138 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5139 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5140 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5141}
5142
5143
5144/**
5145 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5146 *
5147 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5148 * @param u16FSW The FSW from the current instruction.
5149 * @param uFpuOpcode The FPU opcode value.
5150 */
5151void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5152{
5153 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5154 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5155 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5156 iemFpuMaybePopOne(pFpuCtx);
5157}
5158
5159
5160/**
5161 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5162 *
5163 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5164 * @param u16FSW The FSW from the current instruction.
5165 * @param iEffSeg The effective memory operand selector register.
5166 * @param GCPtrEff The effective memory operand offset.
5167 * @param uFpuOpcode The FPU opcode value.
5168 */
5169void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5170{
5171 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5172 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5173 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5174 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5175}
5176
5177
5178/**
5179 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5180 *
5181 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5182 * @param u16FSW The FSW from the current instruction.
5183 * @param uFpuOpcode The FPU opcode value.
5184 */
5185void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5186{
5187 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5188 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5189 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5190 iemFpuMaybePopOne(pFpuCtx);
5191 iemFpuMaybePopOne(pFpuCtx);
5192}
5193
5194
5195/**
5196 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5197 *
5198 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5199 * @param u16FSW The FSW from the current instruction.
5200 * @param iEffSeg The effective memory operand selector register.
5201 * @param GCPtrEff The effective memory operand offset.
5202 * @param uFpuOpcode The FPU opcode value.
5203 */
5204void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5205 uint16_t uFpuOpcode) RT_NOEXCEPT
5206{
5207 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5208 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5209 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5210 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5211 iemFpuMaybePopOne(pFpuCtx);
5212}
5213
5214
5215/**
5216 * Worker routine for raising an FPU stack underflow exception.
5217 *
5218 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5219 * @param pFpuCtx The FPU context.
5220 * @param iStReg The stack register being accessed.
5221 */
5222static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5223{
5224 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5225 if (pFpuCtx->FCW & X86_FCW_IM)
5226 {
5227 /* Masked underflow. */
5228 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5229 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5230 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5231 if (iStReg != UINT8_MAX)
5232 {
5233 pFpuCtx->FTW |= RT_BIT(iReg);
5234 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5235 }
5236 }
5237 else
5238 {
5239 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5240 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5241 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",
5242 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5243 }
5244 RT_NOREF(pVCpu);
5245}
5246
5247
5248/**
5249 * Raises a FPU stack underflow exception.
5250 *
5251 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5252 * @param iStReg The destination register that should be loaded
5253 * with QNaN if \#IS is not masked. Specify
5254 * UINT8_MAX if none (like for fcom).
5255 * @param uFpuOpcode The FPU opcode value.
5256 */
5257void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5258{
5259 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5260 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5261 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5262}
5263
5264
5265void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5266{
5267 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5268 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5269 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5270 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5271}
5272
5273
5274void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5275{
5276 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5277 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5278 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5279 iemFpuMaybePopOne(pFpuCtx);
5280}
5281
5282
5283void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5284 uint16_t uFpuOpcode) RT_NOEXCEPT
5285{
5286 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5287 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5288 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5289 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5290 iemFpuMaybePopOne(pFpuCtx);
5291}
5292
5293
5294void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5295{
5296 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5297 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5298 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
5299 iemFpuMaybePopOne(pFpuCtx);
5300 iemFpuMaybePopOne(pFpuCtx);
5301}
5302
5303
5304void iemFpuStackPushUnderflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5305{
5306 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5307 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5308
5309 if (pFpuCtx->FCW & X86_FCW_IM)
5310 {
5311 /* Masked overflow - Push QNaN. */
5312 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5313 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5314 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5315 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5316 pFpuCtx->FTW |= RT_BIT(iNewTop);
5317 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5318 iemFpuRotateStackPush(pFpuCtx);
5319 }
5320 else
5321 {
5322 /* Exception pending - don't change TOP or the register stack. */
5323 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5324 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5325 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",
5326 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5327 }
5328}
5329
5330
5331void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5332{
5333 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5334 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5335
5336 if (pFpuCtx->FCW & X86_FCW_IM)
5337 {
5338 /* Masked overflow - Push QNaN. */
5339 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5340 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5341 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5342 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5343 pFpuCtx->FTW |= RT_BIT(iNewTop);
5344 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5345 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5346 iemFpuRotateStackPush(pFpuCtx);
5347 }
5348 else
5349 {
5350 /* Exception pending - don't change TOP or the register stack. */
5351 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5352 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5353 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",
5354 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5355 }
5356}
5357
5358
5359/**
5360 * Worker routine for raising an FPU stack overflow exception on a push.
5361 *
5362 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5363 * @param pFpuCtx The FPU context.
5364 */
5365static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5366{
5367 if (pFpuCtx->FCW & X86_FCW_IM)
5368 {
5369 /* Masked overflow. */
5370 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5371 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5372 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5373 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5374 pFpuCtx->FTW |= RT_BIT(iNewTop);
5375 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5376 iemFpuRotateStackPush(pFpuCtx);
5377 }
5378 else
5379 {
5380 /* Exception pending - don't change TOP or the register stack. */
5381 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5382 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5383 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",
5384 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5385 }
5386 RT_NOREF(pVCpu);
5387}
5388
5389
5390/**
5391 * Raises a FPU stack overflow exception on a push.
5392 *
5393 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5394 * @param uFpuOpcode The FPU opcode value.
5395 */
5396void iemFpuStackPushOverflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5397{
5398 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5399 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5400 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5401}
5402
5403
5404/**
5405 * Raises a FPU stack overflow exception on a push with a memory operand.
5406 *
5407 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5408 * @param iEffSeg The effective memory operand selector register.
5409 * @param GCPtrEff The effective memory operand offset.
5410 * @param uFpuOpcode The FPU opcode value.
5411 */
5412void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5413{
5414 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5415 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5416 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5417 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5418}
5419
5420/** @} */
5421
5422
5423/** @name SSE+AVX SIMD access and helpers.
5424 *
5425 * @{
5426 */
5427/**
5428 * Stores a result in a SIMD XMM register, updates the MXCSR.
5429 *
5430 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5431 * @param pResult The result to store.
5432 * @param iXmmReg Which SIMD XMM register to store the result in.
5433 */
5434void iemSseStoreResult(PVMCPUCC pVCpu, PCIEMSSERESULT pResult, uint8_t iXmmReg) RT_NOEXCEPT
5435{
5436 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5437 pFpuCtx->MXCSR |= pResult->MXCSR & X86_MXCSR_XCPT_FLAGS;
5438
5439 /* The result is only updated if there is no unmasked exception pending. */
5440 if (( ~((pFpuCtx->MXCSR & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT)
5441 & (pFpuCtx->MXCSR & X86_MXCSR_XCPT_FLAGS)) == 0)
5442 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXmmReg] = pResult->uResult;
5443}
5444
5445
5446/**
5447 * Updates the MXCSR.
5448 *
5449 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5450 * @param fMxcsr The new MXCSR value.
5451 */
5452void iemSseUpdateMxcsr(PVMCPUCC pVCpu, uint32_t fMxcsr) RT_NOEXCEPT
5453{
5454 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5455 pFpuCtx->MXCSR |= fMxcsr & X86_MXCSR_XCPT_FLAGS;
5456}
5457/** @} */
5458
5459
5460/** @name Memory access.
5461 *
5462 * @{
5463 */
5464
5465
5466/**
5467 * Updates the IEMCPU::cbWritten counter if applicable.
5468 *
5469 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5470 * @param fAccess The access being accounted for.
5471 * @param cbMem The access size.
5472 */
5473DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5474{
5475 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5476 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5477 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5478}
5479
5480
5481/**
5482 * Applies the segment limit, base and attributes.
5483 *
5484 * This may raise a \#GP or \#SS.
5485 *
5486 * @returns VBox strict status code.
5487 *
5488 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5489 * @param fAccess The kind of access which is being performed.
5490 * @param iSegReg The index of the segment register to apply.
5491 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5492 * TSS, ++).
5493 * @param cbMem The access size.
5494 * @param pGCPtrMem Pointer to the guest memory address to apply
5495 * segmentation to. Input and output parameter.
5496 */
5497VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5498{
5499 if (iSegReg == UINT8_MAX)
5500 return VINF_SUCCESS;
5501
5502 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5503 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5504 switch (IEM_GET_CPU_MODE(pVCpu))
5505 {
5506 case IEMMODE_16BIT:
5507 case IEMMODE_32BIT:
5508 {
5509 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5510 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5511
5512 if ( pSel->Attr.n.u1Present
5513 && !pSel->Attr.n.u1Unusable)
5514 {
5515 Assert(pSel->Attr.n.u1DescType);
5516 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5517 {
5518 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5519 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5520 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5521
5522 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5523 {
5524 /** @todo CPL check. */
5525 }
5526
5527 /*
5528 * There are two kinds of data selectors, normal and expand down.
5529 */
5530 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5531 {
5532 if ( GCPtrFirst32 > pSel->u32Limit
5533 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5534 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5535 }
5536 else
5537 {
5538 /*
5539 * The upper boundary is defined by the B bit, not the G bit!
5540 */
5541 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5542 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5543 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5544 }
5545 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5546 }
5547 else
5548 {
5549 /*
5550 * Code selector and usually be used to read thru, writing is
5551 * only permitted in real and V8086 mode.
5552 */
5553 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5554 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5555 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5556 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5557 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5558
5559 if ( GCPtrFirst32 > pSel->u32Limit
5560 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5561 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5562
5563 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5564 {
5565 /** @todo CPL check. */
5566 }
5567
5568 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5569 }
5570 }
5571 else
5572 return iemRaiseGeneralProtectionFault0(pVCpu);
5573 return VINF_SUCCESS;
5574 }
5575
5576 case IEMMODE_64BIT:
5577 {
5578 RTGCPTR GCPtrMem = *pGCPtrMem;
5579 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5580 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5581
5582 Assert(cbMem >= 1);
5583 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5584 return VINF_SUCCESS;
5585 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5586 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5587 return iemRaiseGeneralProtectionFault0(pVCpu);
5588 }
5589
5590 default:
5591 AssertFailedReturn(VERR_IEM_IPE_7);
5592 }
5593}
5594
5595
5596/**
5597 * Translates a virtual address to a physical physical address and checks if we
5598 * can access the page as specified.
5599 *
5600 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5601 * @param GCPtrMem The virtual address.
5602 * @param cbAccess The access size, for raising \#PF correctly for
5603 * FXSAVE and such.
5604 * @param fAccess The intended access.
5605 * @param pGCPhysMem Where to return the physical address.
5606 */
5607VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess,
5608 uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5609{
5610 /** @todo Need a different PGM interface here. We're currently using
5611 * generic / REM interfaces. this won't cut it for R0. */
5612 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5613 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5614 * here. */
5615 PGMPTWALK Walk;
5616 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
5617 if (RT_FAILURE(rc))
5618 {
5619 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5620 /** @todo Check unassigned memory in unpaged mode. */
5621 /** @todo Reserved bits in page tables. Requires new PGM interface. */
5622#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5623 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5624 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5625#endif
5626 *pGCPhysMem = NIL_RTGCPHYS;
5627 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, rc);
5628 }
5629
5630 /* If the page is writable and does not have the no-exec bit set, all
5631 access is allowed. Otherwise we'll have to check more carefully... */
5632 if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
5633 {
5634 /* Write to read only memory? */
5635 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5636 && !(Walk.fEffective & X86_PTE_RW)
5637 && ( ( IEM_GET_CPL(pVCpu) == 3
5638 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5639 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
5640 {
5641 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5642 *pGCPhysMem = NIL_RTGCPHYS;
5643#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5644 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5645 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5646#endif
5647 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5648 }
5649
5650 /* Kernel memory accessed by userland? */
5651 if ( !(Walk.fEffective & X86_PTE_US)
5652 && IEM_GET_CPL(pVCpu) == 3
5653 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5654 {
5655 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5656 *pGCPhysMem = NIL_RTGCPHYS;
5657#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5658 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5659 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5660#endif
5661 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, VERR_ACCESS_DENIED);
5662 }
5663
5664 /* Executing non-executable memory? */
5665 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
5666 && (Walk.fEffective & X86_PTE_PAE_NX)
5667 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
5668 {
5669 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
5670 *pGCPhysMem = NIL_RTGCPHYS;
5671#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5672 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5673 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5674#endif
5675 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
5676 VERR_ACCESS_DENIED);
5677 }
5678 }
5679
5680 /*
5681 * Set the dirty / access flags.
5682 * ASSUMES this is set when the address is translated rather than on committ...
5683 */
5684 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5685 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
5686 if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty)
5687 {
5688 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
5689 AssertRC(rc2);
5690 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
5691 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
5692 }
5693
5694 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
5695 *pGCPhysMem = GCPhys;
5696 return VINF_SUCCESS;
5697}
5698
5699
5700/**
5701 * Looks up a memory mapping entry.
5702 *
5703 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
5704 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5705 * @param pvMem The memory address.
5706 * @param fAccess The access to.
5707 */
5708DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
5709{
5710 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5711 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
5712 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
5713 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5714 return 0;
5715 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
5716 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5717 return 1;
5718 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
5719 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5720 return 2;
5721 return VERR_NOT_FOUND;
5722}
5723
5724
5725/**
5726 * Finds a free memmap entry when using iNextMapping doesn't work.
5727 *
5728 * @returns Memory mapping index, 1024 on failure.
5729 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5730 */
5731static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
5732{
5733 /*
5734 * The easy case.
5735 */
5736 if (pVCpu->iem.s.cActiveMappings == 0)
5737 {
5738 pVCpu->iem.s.iNextMapping = 1;
5739 return 0;
5740 }
5741
5742 /* There should be enough mappings for all instructions. */
5743 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
5744
5745 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
5746 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
5747 return i;
5748
5749 AssertFailedReturn(1024);
5750}
5751
5752
5753/**
5754 * Commits a bounce buffer that needs writing back and unmaps it.
5755 *
5756 * @returns Strict VBox status code.
5757 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5758 * @param iMemMap The index of the buffer to commit.
5759 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
5760 * Always false in ring-3, obviously.
5761 */
5762static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
5763{
5764 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
5765 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
5766#ifdef IN_RING3
5767 Assert(!fPostponeFail);
5768 RT_NOREF_PV(fPostponeFail);
5769#endif
5770
5771 /*
5772 * Do the writing.
5773 */
5774 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5775 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
5776 {
5777 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
5778 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5779 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5780 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
5781 {
5782 /*
5783 * Carefully and efficiently dealing with access handler return
5784 * codes make this a little bloated.
5785 */
5786 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
5787 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5788 pbBuf,
5789 cbFirst,
5790 PGMACCESSORIGIN_IEM);
5791 if (rcStrict == VINF_SUCCESS)
5792 {
5793 if (cbSecond)
5794 {
5795 rcStrict = PGMPhysWrite(pVM,
5796 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5797 pbBuf + cbFirst,
5798 cbSecond,
5799 PGMACCESSORIGIN_IEM);
5800 if (rcStrict == VINF_SUCCESS)
5801 { /* nothing */ }
5802 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5803 {
5804 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
5805 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5806 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5807 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5808 }
5809#ifndef IN_RING3
5810 else if (fPostponeFail)
5811 {
5812 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5813 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5814 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5815 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5816 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5817 return iemSetPassUpStatus(pVCpu, rcStrict);
5818 }
5819#endif
5820 else
5821 {
5822 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5823 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5824 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5825 return rcStrict;
5826 }
5827 }
5828 }
5829 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5830 {
5831 if (!cbSecond)
5832 {
5833 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
5834 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5835 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5836 }
5837 else
5838 {
5839 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
5840 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5841 pbBuf + cbFirst,
5842 cbSecond,
5843 PGMACCESSORIGIN_IEM);
5844 if (rcStrict2 == VINF_SUCCESS)
5845 {
5846 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
5847 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5848 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5849 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5850 }
5851 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5852 {
5853 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
5854 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5855 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5856 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5857 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5858 }
5859#ifndef IN_RING3
5860 else if (fPostponeFail)
5861 {
5862 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5863 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5864 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5865 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5866 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5867 return iemSetPassUpStatus(pVCpu, rcStrict);
5868 }
5869#endif
5870 else
5871 {
5872 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5873 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5874 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5875 return rcStrict2;
5876 }
5877 }
5878 }
5879#ifndef IN_RING3
5880 else if (fPostponeFail)
5881 {
5882 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5883 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5884 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5885 if (!cbSecond)
5886 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
5887 else
5888 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
5889 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5890 return iemSetPassUpStatus(pVCpu, rcStrict);
5891 }
5892#endif
5893 else
5894 {
5895 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5896 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5897 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5898 return rcStrict;
5899 }
5900 }
5901 else
5902 {
5903 /*
5904 * No access handlers, much simpler.
5905 */
5906 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
5907 if (RT_SUCCESS(rc))
5908 {
5909 if (cbSecond)
5910 {
5911 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
5912 if (RT_SUCCESS(rc))
5913 { /* likely */ }
5914 else
5915 {
5916 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5917 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5918 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
5919 return rc;
5920 }
5921 }
5922 }
5923 else
5924 {
5925 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5926 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
5927 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5928 return rc;
5929 }
5930 }
5931 }
5932
5933#if defined(IEM_LOG_MEMORY_WRITES)
5934 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5935 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
5936 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
5937 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5938 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
5939 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
5940
5941 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5942 g_cbIemWrote = cbWrote;
5943 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
5944#endif
5945
5946 /*
5947 * Free the mapping entry.
5948 */
5949 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5950 Assert(pVCpu->iem.s.cActiveMappings != 0);
5951 pVCpu->iem.s.cActiveMappings--;
5952 return VINF_SUCCESS;
5953}
5954
5955
5956/**
5957 * iemMemMap worker that deals with a request crossing pages.
5958 */
5959static VBOXSTRICTRC
5960iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
5961{
5962 Assert(cbMem <= GUEST_PAGE_SIZE);
5963
5964 /*
5965 * Do the address translations.
5966 */
5967 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
5968 RTGCPHYS GCPhysFirst;
5969 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
5970 if (rcStrict != VINF_SUCCESS)
5971 return rcStrict;
5972 Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
5973
5974 uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
5975 RTGCPHYS GCPhysSecond;
5976 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
5977 cbSecondPage, fAccess, &GCPhysSecond);
5978 if (rcStrict != VINF_SUCCESS)
5979 return rcStrict;
5980 Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
5981 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
5982
5983 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5984
5985 /*
5986 * Read in the current memory content if it's a read, execute or partial
5987 * write access.
5988 */
5989 uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5990
5991 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5992 {
5993 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
5994 {
5995 /*
5996 * Must carefully deal with access handler status codes here,
5997 * makes the code a bit bloated.
5998 */
5999 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
6000 if (rcStrict == VINF_SUCCESS)
6001 {
6002 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6003 if (rcStrict == VINF_SUCCESS)
6004 { /*likely */ }
6005 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6006 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6007 else
6008 {
6009 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
6010 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6011 return rcStrict;
6012 }
6013 }
6014 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6015 {
6016 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6017 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6018 {
6019 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6020 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6021 }
6022 else
6023 {
6024 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6025 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6026 return rcStrict2;
6027 }
6028 }
6029 else
6030 {
6031 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6032 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6033 return rcStrict;
6034 }
6035 }
6036 else
6037 {
6038 /*
6039 * No informational status codes here, much more straight forward.
6040 */
6041 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6042 if (RT_SUCCESS(rc))
6043 {
6044 Assert(rc == VINF_SUCCESS);
6045 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6046 if (RT_SUCCESS(rc))
6047 Assert(rc == VINF_SUCCESS);
6048 else
6049 {
6050 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6051 return rc;
6052 }
6053 }
6054 else
6055 {
6056 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6057 return rc;
6058 }
6059 }
6060 }
6061#ifdef VBOX_STRICT
6062 else
6063 memset(pbBuf, 0xcc, cbMem);
6064 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6065 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6066#endif
6067 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
6068
6069 /*
6070 * Commit the bounce buffer entry.
6071 */
6072 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6073 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6074 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6075 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6076 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
6077 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6078 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6079 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6080 pVCpu->iem.s.cActiveMappings++;
6081
6082 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6083 *ppvMem = pbBuf;
6084 return VINF_SUCCESS;
6085}
6086
6087
6088/**
6089 * iemMemMap woker that deals with iemMemPageMap failures.
6090 */
6091static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
6092 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6093{
6094 /*
6095 * Filter out conditions we can handle and the ones which shouldn't happen.
6096 */
6097 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6098 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6099 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6100 {
6101 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6102 return rcMap;
6103 }
6104 pVCpu->iem.s.cPotentialExits++;
6105
6106 /*
6107 * Read in the current memory content if it's a read, execute or partial
6108 * write access.
6109 */
6110 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6111 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6112 {
6113 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6114 memset(pbBuf, 0xff, cbMem);
6115 else
6116 {
6117 int rc;
6118 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6119 {
6120 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6121 if (rcStrict == VINF_SUCCESS)
6122 { /* nothing */ }
6123 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6124 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6125 else
6126 {
6127 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6128 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6129 return rcStrict;
6130 }
6131 }
6132 else
6133 {
6134 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
6135 if (RT_SUCCESS(rc))
6136 { /* likely */ }
6137 else
6138 {
6139 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6140 GCPhysFirst, rc));
6141 return rc;
6142 }
6143 }
6144 }
6145 }
6146#ifdef VBOX_STRICT
6147 else
6148 memset(pbBuf, 0xcc, cbMem);
6149#endif
6150#ifdef VBOX_STRICT
6151 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6152 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6153#endif
6154
6155 /*
6156 * Commit the bounce buffer entry.
6157 */
6158 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6159 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6160 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6161 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
6162 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6163 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6164 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6165 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6166 pVCpu->iem.s.cActiveMappings++;
6167
6168 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6169 *ppvMem = pbBuf;
6170 return VINF_SUCCESS;
6171}
6172
6173
6174
6175/**
6176 * Maps the specified guest memory for the given kind of access.
6177 *
6178 * This may be using bounce buffering of the memory if it's crossing a page
6179 * boundary or if there is an access handler installed for any of it. Because
6180 * of lock prefix guarantees, we're in for some extra clutter when this
6181 * happens.
6182 *
6183 * This may raise a \#GP, \#SS, \#PF or \#AC.
6184 *
6185 * @returns VBox strict status code.
6186 *
6187 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6188 * @param ppvMem Where to return the pointer to the mapped memory.
6189 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
6190 * 8, 12, 16, 32 or 512. When used by string operations
6191 * it can be up to a page.
6192 * @param iSegReg The index of the segment register to use for this
6193 * access. The base and limits are checked. Use UINT8_MAX
6194 * to indicate that no segmentation is required (for IDT,
6195 * GDT and LDT accesses).
6196 * @param GCPtrMem The address of the guest memory.
6197 * @param fAccess How the memory is being accessed. The
6198 * IEM_ACCESS_TYPE_XXX bit is used to figure out how to map
6199 * the memory, while the IEM_ACCESS_WHAT_XXX bit is used
6200 * when raising exceptions.
6201 * @param uAlignCtl Alignment control:
6202 * - Bits 15:0 is the alignment mask.
6203 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6204 * IEM_MEMMAP_F_ALIGN_SSE, and
6205 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6206 * Pass zero to skip alignment.
6207 */
6208VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
6209 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
6210{
6211 /*
6212 * Check the input and figure out which mapping entry to use.
6213 */
6214 Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));
6215 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94
6216 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );
6217 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6218 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6219
6220 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6221 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6222 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6223 {
6224 iMemMap = iemMemMapFindFree(pVCpu);
6225 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6226 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6227 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6228 pVCpu->iem.s.aMemMappings[2].fAccess),
6229 VERR_IEM_IPE_9);
6230 }
6231
6232 /*
6233 * Map the memory, checking that we can actually access it. If something
6234 * slightly complicated happens, fall back on bounce buffering.
6235 */
6236 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6237 if (rcStrict == VINF_SUCCESS)
6238 { /* likely */ }
6239 else
6240 return rcStrict;
6241
6242 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
6243 { /* likely */ }
6244 else
6245 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
6246
6247 /*
6248 * Alignment check.
6249 */
6250 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6251 { /* likelyish */ }
6252 else
6253 {
6254 /* Misaligned access. */
6255 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6256 {
6257 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6258 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6259 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6260 {
6261 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6262
6263 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6264 return iemRaiseAlignmentCheckException(pVCpu);
6265 }
6266 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6267 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6268 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6269 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6270 * that's what FXSAVE does on a 10980xe. */
6271 && iemMemAreAlignmentChecksEnabled(pVCpu))
6272 return iemRaiseAlignmentCheckException(pVCpu);
6273 else
6274 return iemRaiseGeneralProtectionFault0(pVCpu);
6275 }
6276 }
6277
6278#ifdef IEM_WITH_DATA_TLB
6279 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6280
6281 /*
6282 * Get the TLB entry for this page.
6283 */
6284 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6285 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6286 if (pTlbe->uTag == uTag)
6287 {
6288# ifdef VBOX_WITH_STATISTICS
6289 pVCpu->iem.s.DataTlb.cTlbHits++;
6290# endif
6291 }
6292 else
6293 {
6294 pVCpu->iem.s.DataTlb.cTlbMisses++;
6295 PGMPTWALK Walk;
6296 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6297 if (RT_FAILURE(rc))
6298 {
6299 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6300# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6301 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6302 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6303# endif
6304 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6305 }
6306
6307 Assert(Walk.fSucceeded);
6308 pTlbe->uTag = uTag;
6309 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6310 pTlbe->GCPhys = Walk.GCPhys;
6311 pTlbe->pbMappingR3 = NULL;
6312 }
6313
6314 /*
6315 * Check TLB page table level access flags.
6316 */
6317 /* If the page is either supervisor only or non-writable, we need to do
6318 more careful access checks. */
6319 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
6320 {
6321 /* Write to read only memory? */
6322 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
6323 && (fAccess & IEM_ACCESS_TYPE_WRITE)
6324 && ( ( IEM_GET_CPL(pVCpu) == 3
6325 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6326 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
6327 {
6328 Log(("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6329# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6330 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6331 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6332# endif
6333 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6334 }
6335
6336 /* Kernel memory accessed by userland? */
6337 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6338 && IEM_GET_CPL(pVCpu) == 3
6339 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6340 {
6341 Log(("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6342# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6343 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6344 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6345# endif
6346 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6347 }
6348 }
6349
6350 /*
6351 * Set the dirty / access flags.
6352 * ASSUMES this is set when the address is translated rather than on commit...
6353 */
6354 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6355 uint64_t const fTlbAccessedDirty = (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0) | IEMTLBE_F_PT_NO_ACCESSED;
6356 if (pTlbe->fFlagsAndPhysRev & fTlbAccessedDirty)
6357 {
6358 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6359 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6360 AssertRC(rc2);
6361 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6362 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6363 pTlbe->fFlagsAndPhysRev &= ~fTlbAccessedDirty;
6364 }
6365
6366 /*
6367 * Look up the physical page info if necessary.
6368 */
6369 uint8_t *pbMem = NULL;
6370 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6371# ifdef IN_RING3
6372 pbMem = pTlbe->pbMappingR3;
6373# else
6374 pbMem = NULL;
6375# endif
6376 else
6377 {
6378 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6379 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6380 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6381 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6382 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
6383 { /* likely */ }
6384 else
6385 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
6386 pTlbe->pbMappingR3 = NULL;
6387 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6388 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
6389 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6390 &pbMem, &pTlbe->fFlagsAndPhysRev);
6391 AssertRCReturn(rc, rc);
6392# ifdef IN_RING3
6393 pTlbe->pbMappingR3 = pbMem;
6394# endif
6395 }
6396
6397 /*
6398 * Check the physical page level access and mapping.
6399 */
6400 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
6401 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
6402 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
6403 { /* probably likely */ }
6404 else
6405 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem,
6406 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6407 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6408 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6409 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6410 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6411
6412 if (pbMem)
6413 {
6414 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6415 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6416 fAccess |= IEM_ACCESS_NOT_LOCKED;
6417 }
6418 else
6419 {
6420 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6421 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6422 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6423 if (rcStrict != VINF_SUCCESS)
6424 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6425 }
6426
6427 void * const pvMem = pbMem;
6428
6429 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6430 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6431 if (fAccess & IEM_ACCESS_TYPE_READ)
6432 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6433
6434#else /* !IEM_WITH_DATA_TLB */
6435
6436 RTGCPHYS GCPhysFirst;
6437 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6438 if (rcStrict != VINF_SUCCESS)
6439 return rcStrict;
6440
6441 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6442 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6443 if (fAccess & IEM_ACCESS_TYPE_READ)
6444 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6445
6446 void *pvMem;
6447 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6448 if (rcStrict != VINF_SUCCESS)
6449 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6450
6451#endif /* !IEM_WITH_DATA_TLB */
6452
6453 /*
6454 * Fill in the mapping table entry.
6455 */
6456 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6457 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6458 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6459 pVCpu->iem.s.cActiveMappings += 1;
6460
6461 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6462 *ppvMem = pvMem;
6463
6464 return VINF_SUCCESS;
6465}
6466
6467
6468/**
6469 * Commits the guest memory if bounce buffered and unmaps it.
6470 *
6471 * @returns Strict VBox status code.
6472 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6473 * @param pvMem The mapping.
6474 * @param fAccess The kind of access.
6475 */
6476VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6477{
6478 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6479 AssertReturn(iMemMap >= 0, iMemMap);
6480
6481 /* If it's bounce buffered, we may need to write back the buffer. */
6482 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6483 {
6484 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6485 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6486 }
6487 /* Otherwise unlock it. */
6488 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6489 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6490
6491 /* Free the entry. */
6492 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6493 Assert(pVCpu->iem.s.cActiveMappings != 0);
6494 pVCpu->iem.s.cActiveMappings--;
6495 return VINF_SUCCESS;
6496}
6497
6498#ifdef IEM_WITH_SETJMP
6499
6500/**
6501 * Maps the specified guest memory for the given kind of access, longjmp on
6502 * error.
6503 *
6504 * This may be using bounce buffering of the memory if it's crossing a page
6505 * boundary or if there is an access handler installed for any of it. Because
6506 * of lock prefix guarantees, we're in for some extra clutter when this
6507 * happens.
6508 *
6509 * This may raise a \#GP, \#SS, \#PF or \#AC.
6510 *
6511 * @returns Pointer to the mapped memory.
6512 *
6513 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6514 * @param cbMem The number of bytes to map. This is usually 1,
6515 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6516 * string operations it can be up to a page.
6517 * @param iSegReg The index of the segment register to use for
6518 * this access. The base and limits are checked.
6519 * Use UINT8_MAX to indicate that no segmentation
6520 * is required (for IDT, GDT and LDT accesses).
6521 * @param GCPtrMem The address of the guest memory.
6522 * @param fAccess How the memory is being accessed. The
6523 * IEM_ACCESS_TYPE_XXX bit is used to figure out
6524 * how to map the memory, while the
6525 * IEM_ACCESS_WHAT_XXX bit is used when raising
6526 * exceptions.
6527 * @param uAlignCtl Alignment control:
6528 * - Bits 15:0 is the alignment mask.
6529 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6530 * IEM_MEMMAP_F_ALIGN_SSE, and
6531 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6532 * Pass zero to skip alignment.
6533 */
6534void *iemMemMapJmp(PVMCPUCC pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess,
6535 uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
6536{
6537 /*
6538 * Check the input, check segment access and adjust address
6539 * with segment base.
6540 */
6541 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6542 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6543 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6544
6545 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6546 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6547 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6548
6549 /*
6550 * Alignment check.
6551 */
6552 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6553 { /* likelyish */ }
6554 else
6555 {
6556 /* Misaligned access. */
6557 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6558 {
6559 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6560 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6561 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6562 {
6563 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6564
6565 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6566 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6567 }
6568 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6569 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6570 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6571 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6572 * that's what FXSAVE does on a 10980xe. */
6573 && iemMemAreAlignmentChecksEnabled(pVCpu))
6574 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6575 else
6576 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
6577 }
6578 }
6579
6580 /*
6581 * Figure out which mapping entry to use.
6582 */
6583 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6584 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6585 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6586 {
6587 iMemMap = iemMemMapFindFree(pVCpu);
6588 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6589 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6590 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6591 pVCpu->iem.s.aMemMappings[2].fAccess),
6592 IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_9));
6593 }
6594
6595 /*
6596 * Crossing a page boundary?
6597 */
6598 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
6599 { /* No (likely). */ }
6600 else
6601 {
6602 void *pvMem;
6603 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
6604 if (rcStrict == VINF_SUCCESS)
6605 return pvMem;
6606 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6607 }
6608
6609#ifdef IEM_WITH_DATA_TLB
6610 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6611
6612 /*
6613 * Get the TLB entry for this page.
6614 */
6615 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6616 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6617 if (pTlbe->uTag == uTag)
6618 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6619 else
6620 {
6621 pVCpu->iem.s.DataTlb.cTlbMisses++;
6622 PGMPTWALK Walk;
6623 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6624 if (RT_FAILURE(rc))
6625 {
6626 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6627# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6628 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6629 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6630# endif
6631 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6632 }
6633
6634 Assert(Walk.fSucceeded);
6635 pTlbe->uTag = uTag;
6636 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6637 pTlbe->GCPhys = Walk.GCPhys;
6638 pTlbe->pbMappingR3 = NULL;
6639 }
6640
6641 /*
6642 * Check the flags and physical revision.
6643 */
6644 /** @todo make the caller pass these in with fAccess. */
6645 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && IEM_GET_CPL(pVCpu) == 3
6646 ? IEMTLBE_F_PT_NO_USER : 0;
6647 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
6648 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
6649 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
6650 || (IEM_GET_CPL(pVCpu) == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6651 ? IEMTLBE_F_PT_NO_WRITE : 0)
6652 : 0;
6653 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
6654 uint8_t *pbMem = NULL;
6655 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
6656 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6657# ifdef IN_RING3
6658 pbMem = pTlbe->pbMappingR3;
6659# else
6660 pbMem = NULL;
6661# endif
6662 else
6663 {
6664 /*
6665 * Okay, something isn't quite right or needs refreshing.
6666 */
6667 /* Write to read only memory? */
6668 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
6669 {
6670 Log(("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6671# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6672 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6673 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6674# endif
6675 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6676 }
6677
6678 /* Kernel memory accessed by userland? */
6679 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
6680 {
6681 Log(("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6682# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6683 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6684 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6685# endif
6686 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6687 }
6688
6689 /* Set the dirty / access flags.
6690 ASSUMES this is set when the address is translated rather than on commit... */
6691 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6692 if (pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED))
6693 {
6694 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6695 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6696 AssertRC(rc2);
6697 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6698 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6699 pTlbe->fFlagsAndPhysRev &= ~((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED);
6700 }
6701
6702 /*
6703 * Check if the physical page info needs updating.
6704 */
6705 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6706# ifdef IN_RING3
6707 pbMem = pTlbe->pbMappingR3;
6708# else
6709 pbMem = NULL;
6710# endif
6711 else
6712 {
6713 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6714 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6715 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6716 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6717 pTlbe->pbMappingR3 = NULL;
6718 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6719 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
6720 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6721 &pbMem, &pTlbe->fFlagsAndPhysRev);
6722 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
6723# ifdef IN_RING3
6724 pTlbe->pbMappingR3 = pbMem;
6725# endif
6726 }
6727
6728 /*
6729 * Check the physical page level access and mapping.
6730 */
6731 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
6732 { /* probably likely */ }
6733 else
6734 {
6735 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, cbMem,
6736 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6737 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6738 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6739 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6740 if (rcStrict == VINF_SUCCESS)
6741 return pbMem;
6742 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6743 }
6744 }
6745 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6746
6747 if (pbMem)
6748 {
6749 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6750 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6751 fAccess |= IEM_ACCESS_NOT_LOCKED;
6752 }
6753 else
6754 {
6755 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6756 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6757 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6758 if (rcStrict == VINF_SUCCESS)
6759 return pbMem;
6760 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6761 }
6762
6763 void * const pvMem = pbMem;
6764
6765 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6766 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6767 if (fAccess & IEM_ACCESS_TYPE_READ)
6768 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6769
6770#else /* !IEM_WITH_DATA_TLB */
6771
6772
6773 RTGCPHYS GCPhysFirst;
6774 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6775 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6776 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6777
6778 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6779 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6780 if (fAccess & IEM_ACCESS_TYPE_READ)
6781 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6782
6783 void *pvMem;
6784 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6785 if (rcStrict == VINF_SUCCESS)
6786 { /* likely */ }
6787 else
6788 {
6789 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6790 if (rcStrict == VINF_SUCCESS)
6791 return pvMem;
6792 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6793 }
6794
6795#endif /* !IEM_WITH_DATA_TLB */
6796
6797 /*
6798 * Fill in the mapping table entry.
6799 */
6800 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6801 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6802 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6803 pVCpu->iem.s.cActiveMappings++;
6804
6805 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6806 return pvMem;
6807}
6808
6809
6810/**
6811 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
6812 *
6813 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6814 * @param pvMem The mapping.
6815 * @param fAccess The kind of access.
6816 */
6817void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
6818{
6819 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6820 AssertStmt(iMemMap >= 0, IEM_DO_LONGJMP(pVCpu, iMemMap));
6821
6822 /* If it's bounce buffered, we may need to write back the buffer. */
6823 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6824 {
6825 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6826 {
6827 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6828 if (rcStrict == VINF_SUCCESS)
6829 return;
6830 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6831 }
6832 }
6833 /* Otherwise unlock it. */
6834 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6835 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6836
6837 /* Free the entry. */
6838 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6839 Assert(pVCpu->iem.s.cActiveMappings != 0);
6840 pVCpu->iem.s.cActiveMappings--;
6841}
6842
6843#endif /* IEM_WITH_SETJMP */
6844
6845#ifndef IN_RING3
6846/**
6847 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
6848 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
6849 *
6850 * Allows the instruction to be completed and retired, while the IEM user will
6851 * return to ring-3 immediately afterwards and do the postponed writes there.
6852 *
6853 * @returns VBox status code (no strict statuses). Caller must check
6854 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
6855 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6856 * @param pvMem The mapping.
6857 * @param fAccess The kind of access.
6858 */
6859VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6860{
6861 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6862 AssertReturn(iMemMap >= 0, iMemMap);
6863
6864 /* If it's bounce buffered, we may need to write back the buffer. */
6865 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6866 {
6867 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6868 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
6869 }
6870 /* Otherwise unlock it. */
6871 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6872 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6873
6874 /* Free the entry. */
6875 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6876 Assert(pVCpu->iem.s.cActiveMappings != 0);
6877 pVCpu->iem.s.cActiveMappings--;
6878 return VINF_SUCCESS;
6879}
6880#endif
6881
6882
6883/**
6884 * Rollbacks mappings, releasing page locks and such.
6885 *
6886 * The caller shall only call this after checking cActiveMappings.
6887 *
6888 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6889 */
6890void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
6891{
6892 Assert(pVCpu->iem.s.cActiveMappings > 0);
6893
6894 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
6895 while (iMemMap-- > 0)
6896 {
6897 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
6898 if (fAccess != IEM_ACCESS_INVALID)
6899 {
6900 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
6901 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6902 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
6903 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6904 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
6905 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
6906 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
6907 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
6908 pVCpu->iem.s.cActiveMappings--;
6909 }
6910 }
6911}
6912
6913
6914/**
6915 * Fetches a data byte.
6916 *
6917 * @returns Strict VBox status code.
6918 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6919 * @param pu8Dst Where to return the byte.
6920 * @param iSegReg The index of the segment register to use for
6921 * this access. The base and limits are checked.
6922 * @param GCPtrMem The address of the guest memory.
6923 */
6924VBOXSTRICTRC iemMemFetchDataU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6925{
6926 /* The lazy approach for now... */
6927 uint8_t const *pu8Src;
6928 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 0);
6929 if (rc == VINF_SUCCESS)
6930 {
6931 *pu8Dst = *pu8Src;
6932 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6933 Log9(("IEM RD byte %d|%RGv: %#04x\n", iSegReg, GCPtrMem, *pu8Dst));
6934 }
6935 return rc;
6936}
6937
6938
6939#ifdef IEM_WITH_SETJMP
6940/**
6941 * Fetches a data byte, longjmp on error.
6942 *
6943 * @returns The byte.
6944 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6945 * @param iSegReg The index of the segment register to use for
6946 * this access. The base and limits are checked.
6947 * @param GCPtrMem The address of the guest memory.
6948 */
6949uint8_t iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
6950{
6951 /* The lazy approach for now... */
6952 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 0);
6953 uint8_t const bRet = *pu8Src;
6954 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6955 Log9(("IEM RD byte %d|%RGv: %#04x\n", iSegReg, GCPtrMem, bRet));
6956 return bRet;
6957}
6958#endif /* IEM_WITH_SETJMP */
6959
6960
6961/**
6962 * Fetches a data word.
6963 *
6964 * @returns Strict VBox status code.
6965 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6966 * @param pu16Dst Where to return the word.
6967 * @param iSegReg The index of the segment register to use for
6968 * this access. The base and limits are checked.
6969 * @param GCPtrMem The address of the guest memory.
6970 */
6971VBOXSTRICTRC iemMemFetchDataU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6972{
6973 /* The lazy approach for now... */
6974 uint16_t const *pu16Src;
6975 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem,
6976 IEM_ACCESS_DATA_R, sizeof(*pu16Src) - 1);
6977 if (rc == VINF_SUCCESS)
6978 {
6979 *pu16Dst = *pu16Src;
6980 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6981 Log9(("IEM RD word %d|%RGv: %#06x\n", iSegReg, GCPtrMem, *pu16Dst));
6982 }
6983 return rc;
6984}
6985
6986
6987#ifdef IEM_WITH_SETJMP
6988/**
6989 * Fetches a data word, longjmp on error.
6990 *
6991 * @returns The word
6992 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6993 * @param iSegReg The index of the segment register to use for
6994 * this access. The base and limits are checked.
6995 * @param GCPtrMem The address of the guest memory.
6996 */
6997uint16_t iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
6998{
6999 /* The lazy approach for now... */
7000 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7001 sizeof(*pu16Src) - 1);
7002 uint16_t const u16Ret = *pu16Src;
7003 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
7004 Log9(("IEM RD word %d|%RGv: %#06x\n", iSegReg, GCPtrMem, u16Ret));
7005 return u16Ret;
7006}
7007#endif
7008
7009
7010/**
7011 * Fetches a data dword.
7012 *
7013 * @returns Strict VBox status code.
7014 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7015 * @param pu32Dst Where to return the dword.
7016 * @param iSegReg The index of the segment register to use for
7017 * this access. The base and limits are checked.
7018 * @param GCPtrMem The address of the guest memory.
7019 */
7020VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7021{
7022 /* The lazy approach for now... */
7023 uint32_t const *pu32Src;
7024 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
7025 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7026 if (rc == VINF_SUCCESS)
7027 {
7028 *pu32Dst = *pu32Src;
7029 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7030 Log9(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, *pu32Dst));
7031 }
7032 return rc;
7033}
7034
7035
7036/**
7037 * Fetches a data dword and zero extends it to a qword.
7038 *
7039 * @returns Strict VBox status code.
7040 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7041 * @param pu64Dst Where to return the qword.
7042 * @param iSegReg The index of the segment register to use for
7043 * this access. The base and limits are checked.
7044 * @param GCPtrMem The address of the guest memory.
7045 */
7046VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7047{
7048 /* The lazy approach for now... */
7049 uint32_t const *pu32Src;
7050 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
7051 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7052 if (rc == VINF_SUCCESS)
7053 {
7054 *pu64Dst = *pu32Src;
7055 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7056 Log9(("IEM RD dword %d|%RGv: %#010RX64\n", iSegReg, GCPtrMem, *pu64Dst));
7057 }
7058 return rc;
7059}
7060
7061
7062#ifdef IEM_WITH_SETJMP
7063
7064/**
7065 * Fetches a data dword, longjmp on error, fallback/safe version.
7066 *
7067 * @returns The dword
7068 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7069 * @param iSegReg The index of the segment register to use for
7070 * this access. The base and limits are checked.
7071 * @param GCPtrMem The address of the guest memory.
7072 */
7073uint32_t iemMemFetchDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7074{
7075 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7076 sizeof(*pu32Src) - 1);
7077 uint32_t const u32Ret = *pu32Src;
7078 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7079 Log9(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, u32Ret));
7080 return u32Ret;
7081}
7082
7083
7084/**
7085 * Fetches a data dword, longjmp on error.
7086 *
7087 * @returns The dword
7088 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7089 * @param iSegReg The index of the segment register to use for
7090 * this access. The base and limits are checked.
7091 * @param GCPtrMem The address of the guest memory.
7092 */
7093uint32_t iemMemFetchDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7094{
7095# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
7096 /*
7097 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
7098 */
7099 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
7100 if (RT_LIKELY((GCPtrEff & GUEST_PAGE_OFFSET_MASK) <= GUEST_PAGE_SIZE - sizeof(uint32_t)))
7101 {
7102 /*
7103 * TLB lookup.
7104 */
7105 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
7106 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
7107 if (pTlbe->uTag == uTag)
7108 {
7109 /*
7110 * Check TLB page table level access flags.
7111 */
7112 uint64_t const fNoUser = IEM_GET_CPL(pVCpu) == 3 ? IEMTLBE_F_PT_NO_USER : 0;
7113 if ( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
7114 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
7115 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
7116 {
7117 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
7118
7119 /*
7120 * Alignment check:
7121 */
7122 /** @todo check priority \#AC vs \#PF */
7123 if ( !(GCPtrEff & (sizeof(uint32_t) - 1))
7124 || !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
7125 || !pVCpu->cpum.GstCtx.eflags.Bits.u1AC
7126 || IEM_GET_CPL(pVCpu) != 3)
7127 {
7128 /*
7129 * Fetch and return the dword
7130 */
7131 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
7132 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
7133 uint32_t const u32Ret = *(uint32_t const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
7134 Log9(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, u32Ret));
7135 return u32Ret;
7136 }
7137 Log10(("iemMemFetchDataU32Jmp: Raising #AC for %RGv\n", GCPtrEff));
7138 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
7139 }
7140 }
7141 }
7142
7143 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
7144 outdated page pointer, or other troubles. */
7145 Log10(("iemMemFetchDataU32Jmp: %u:%RGv fallback\n", iSegReg, GCPtrMem));
7146 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
7147
7148# else
7149 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem,
7150 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7151 uint32_t const u32Ret = *pu32Src;
7152 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7153 Log9(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, u32Ret));
7154 return u32Ret;
7155# endif
7156}
7157
7158/**
7159 * Fetches a data dword from a FLAT address, longjmp on error.
7160 *
7161 * @returns The dword
7162 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7163 * @param GCPtrMem The address of the guest memory.
7164 */
7165uint32_t iemMemFlatFetchDataU32Jmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7166{
7167# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
7168 /*
7169 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
7170 */
7171 RTGCPTR GCPtrEff = GCPtrMem;
7172 if (RT_LIKELY((GCPtrEff & GUEST_PAGE_OFFSET_MASK) <= GUEST_PAGE_SIZE - sizeof(uint32_t)))
7173 {
7174 /*
7175 * TLB lookup.
7176 */
7177 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
7178 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
7179 if (pTlbe->uTag == uTag)
7180 {
7181 /*
7182 * Check TLB page table level access flags.
7183 */
7184 uint64_t const fNoUser = IEM_GET_CPL(pVCpu) == 3 ? IEMTLBE_F_PT_NO_USER : 0;
7185 if ( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
7186 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
7187 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
7188 {
7189 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
7190
7191 /*
7192 * Alignment check:
7193 */
7194 /** @todo check priority \#AC vs \#PF */
7195 if ( !(GCPtrEff & (sizeof(uint32_t) - 1))
7196 || !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
7197 || !pVCpu->cpum.GstCtx.eflags.Bits.u1AC
7198 || IEM_GET_CPL(pVCpu) != 3)
7199 {
7200 /*
7201 * Fetch and return the dword
7202 */
7203 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
7204 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
7205 uint32_t const u32Ret = *(uint32_t const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
7206 Log9(("IEM RD dword %RGv: %#010x\n", GCPtrMem, u32Ret));
7207 return u32Ret;
7208 }
7209 Log10(("iemMemFlatFetchDataU32Jmp: Raising #AC for %RGv\n", GCPtrEff));
7210 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
7211 }
7212 }
7213 }
7214
7215 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
7216 outdated page pointer, or other troubles. */
7217 Log10(("iemMemFlatFetchDataU32Jmp: %RGv fallback\n", GCPtrMem));
7218 return iemMemFetchDataU32SafeJmp(pVCpu, UINT8_MAX, GCPtrMem);
7219
7220# else
7221 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), UINT8_MAX, GCPtrMem,
7222 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7223 uint32_t const u32Ret = *pu32Src;
7224 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7225 Log9(("IEM RD dword %RGv: %#010x\n", GCPtrMem, u32Ret));
7226 return u32Ret;
7227# endif
7228}
7229
7230#endif /* IEM_WITH_SETJMP */
7231
7232
7233#ifdef SOME_UNUSED_FUNCTION
7234/**
7235 * Fetches a data dword and sign extends it to a qword.
7236 *
7237 * @returns Strict VBox status code.
7238 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7239 * @param pu64Dst Where to return the sign extended value.
7240 * @param iSegReg The index of the segment register to use for
7241 * this access. The base and limits are checked.
7242 * @param GCPtrMem The address of the guest memory.
7243 */
7244VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7245{
7246 /* The lazy approach for now... */
7247 int32_t const *pi32Src;
7248 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem,
7249 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
7250 if (rc == VINF_SUCCESS)
7251 {
7252 *pu64Dst = *pi32Src;
7253 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
7254 Log9(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, (uint32_t)*pu64Dst));
7255 }
7256#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7257 else
7258 *pu64Dst = 0;
7259#endif
7260 return rc;
7261}
7262#endif
7263
7264
7265/**
7266 * Fetches a data qword.
7267 *
7268 * @returns Strict VBox status code.
7269 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7270 * @param pu64Dst Where to return the qword.
7271 * @param iSegReg The index of the segment register to use for
7272 * this access. The base and limits are checked.
7273 * @param GCPtrMem The address of the guest memory.
7274 */
7275VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7276{
7277 /* The lazy approach for now... */
7278 uint64_t const *pu64Src;
7279 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem,
7280 IEM_ACCESS_DATA_R, sizeof(*pu64Src) - 1);
7281 if (rc == VINF_SUCCESS)
7282 {
7283 *pu64Dst = *pu64Src;
7284 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7285 Log9(("IEM RD qword %d|%RGv: %#018RX64\n", iSegReg, GCPtrMem, *pu64Dst));
7286 }
7287 return rc;
7288}
7289
7290
7291#ifdef IEM_WITH_SETJMP
7292/**
7293 * Fetches a data qword, longjmp on error.
7294 *
7295 * @returns The qword.
7296 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7297 * @param iSegReg The index of the segment register to use for
7298 * this access. The base and limits are checked.
7299 * @param GCPtrMem The address of the guest memory.
7300 */
7301uint64_t iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7302{
7303 /* The lazy approach for now... */
7304 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem,
7305 IEM_ACCESS_DATA_R, sizeof(*pu64Src) - 1);
7306 uint64_t const u64Ret = *pu64Src;
7307 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7308 Log9(("IEM RD qword %d|%RGv: %#018RX64\n", iSegReg, GCPtrMem, u64Ret));
7309 return u64Ret;
7310}
7311#endif
7312
7313
7314/**
7315 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
7316 *
7317 * @returns Strict VBox status code.
7318 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7319 * @param pu64Dst Where to return the qword.
7320 * @param iSegReg The index of the segment register to use for
7321 * this access. The base and limits are checked.
7322 * @param GCPtrMem The address of the guest memory.
7323 */
7324VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7325{
7326 /* The lazy approach for now... */
7327 uint64_t const *pu64Src;
7328 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem,
7329 IEM_ACCESS_DATA_R, 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7330 if (rc == VINF_SUCCESS)
7331 {
7332 *pu64Dst = *pu64Src;
7333 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7334 Log9(("IEM RD qword %d|%RGv: %#018RX64\n", iSegReg, GCPtrMem, *pu64Dst));
7335 }
7336 return rc;
7337}
7338
7339
7340#ifdef IEM_WITH_SETJMP
7341/**
7342 * Fetches a data qword, longjmp on error.
7343 *
7344 * @returns The qword.
7345 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7346 * @param iSegReg The index of the segment register to use for
7347 * this access. The base and limits are checked.
7348 * @param GCPtrMem The address of the guest memory.
7349 */
7350uint64_t iemMemFetchDataU64AlignedU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7351{
7352 /* The lazy approach for now... */
7353 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7354 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7355 uint64_t const u64Ret = *pu64Src;
7356 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7357 Log9(("IEM RD qword %d|%RGv: %#018RX64\n", iSegReg, GCPtrMem, u64Ret));
7358 return u64Ret;
7359}
7360#endif
7361
7362
7363/**
7364 * Fetches a data tword.
7365 *
7366 * @returns Strict VBox status code.
7367 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7368 * @param pr80Dst Where to return the tword.
7369 * @param iSegReg The index of the segment register to use for
7370 * this access. The base and limits are checked.
7371 * @param GCPtrMem The address of the guest memory.
7372 */
7373VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7374{
7375 /* The lazy approach for now... */
7376 PCRTFLOAT80U pr80Src;
7377 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7);
7378 if (rc == VINF_SUCCESS)
7379 {
7380 *pr80Dst = *pr80Src;
7381 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7382 Log9(("IEM RD tword %d|%RGv: %.10Rhxs\n", iSegReg, GCPtrMem, pr80Dst));
7383 }
7384 return rc;
7385}
7386
7387
7388#ifdef IEM_WITH_SETJMP
7389/**
7390 * Fetches a data tword, longjmp on error.
7391 *
7392 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7393 * @param pr80Dst Where to return the tword.
7394 * @param iSegReg The index of the segment register to use for
7395 * this access. The base and limits are checked.
7396 * @param GCPtrMem The address of the guest memory.
7397 */
7398void iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7399{
7400 /* The lazy approach for now... */
7401 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7);
7402 *pr80Dst = *pr80Src;
7403 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7404 Log9(("IEM RD tword %d|%RGv: %.10Rhxs\n", iSegReg, GCPtrMem, pr80Dst));
7405}
7406#endif
7407
7408
7409/**
7410 * Fetches a data decimal tword.
7411 *
7412 * @returns Strict VBox status code.
7413 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7414 * @param pd80Dst Where to return the tword.
7415 * @param iSegReg The index of the segment register to use for
7416 * this access. The base and limits are checked.
7417 * @param GCPtrMem The address of the guest memory.
7418 */
7419VBOXSTRICTRC iemMemFetchDataD80(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7420{
7421 /* The lazy approach for now... */
7422 PCRTPBCD80U pd80Src;
7423 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pd80Src, sizeof(*pd80Src), iSegReg, GCPtrMem,
7424 IEM_ACCESS_DATA_R, 7 /** @todo FBLD alignment check */);
7425 if (rc == VINF_SUCCESS)
7426 {
7427 *pd80Dst = *pd80Src;
7428 rc = iemMemCommitAndUnmap(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
7429 Log9(("IEM RD tword %d|%RGv: %.10Rhxs\n", iSegReg, GCPtrMem, pd80Dst));
7430 }
7431 return rc;
7432}
7433
7434
7435#ifdef IEM_WITH_SETJMP
7436/**
7437 * Fetches a data decimal tword, longjmp on error.
7438 *
7439 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7440 * @param pd80Dst Where to return the tword.
7441 * @param iSegReg The index of the segment register to use for
7442 * this access. The base and limits are checked.
7443 * @param GCPtrMem The address of the guest memory.
7444 */
7445void iemMemFetchDataD80Jmp(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7446{
7447 /* The lazy approach for now... */
7448 PCRTPBCD80U pd80Src = (PCRTPBCD80U)iemMemMapJmp(pVCpu, sizeof(*pd80Src), iSegReg, GCPtrMem,
7449 IEM_ACCESS_DATA_R, 7 /** @todo FBSTP alignment check */);
7450 *pd80Dst = *pd80Src;
7451 iemMemCommitAndUnmapJmp(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
7452 Log9(("IEM RD tword %d|%RGv: %.10Rhxs\n", iSegReg, GCPtrMem, pd80Dst));
7453}
7454#endif
7455
7456
7457/**
7458 * Fetches a data dqword (double qword), generally SSE related.
7459 *
7460 * @returns Strict VBox status code.
7461 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7462 * @param pu128Dst Where to return the qword.
7463 * @param iSegReg The index of the segment register to use for
7464 * this access. The base and limits are checked.
7465 * @param GCPtrMem The address of the guest memory.
7466 */
7467VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7468{
7469 /* The lazy approach for now... */
7470 PCRTUINT128U pu128Src;
7471 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
7472 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7473 if (rc == VINF_SUCCESS)
7474 {
7475 pu128Dst->au64[0] = pu128Src->au64[0];
7476 pu128Dst->au64[1] = pu128Src->au64[1];
7477 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7478 Log9(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7479 }
7480 return rc;
7481}
7482
7483
7484#ifdef IEM_WITH_SETJMP
7485/**
7486 * Fetches a data dqword (double qword), generally SSE related.
7487 *
7488 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7489 * @param pu128Dst Where to return the qword.
7490 * @param iSegReg The index of the segment register to use for
7491 * this access. The base and limits are checked.
7492 * @param GCPtrMem The address of the guest memory.
7493 */
7494void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7495{
7496 /* The lazy approach for now... */
7497 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem,
7498 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7499 pu128Dst->au64[0] = pu128Src->au64[0];
7500 pu128Dst->au64[1] = pu128Src->au64[1];
7501 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7502 Log9(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7503}
7504#endif
7505
7506
7507/**
7508 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7509 * related.
7510 *
7511 * Raises \#GP(0) if not aligned.
7512 *
7513 * @returns Strict VBox status code.
7514 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7515 * @param pu128Dst Where to return the qword.
7516 * @param iSegReg The index of the segment register to use for
7517 * this access. The base and limits are checked.
7518 * @param GCPtrMem The address of the guest memory.
7519 */
7520VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7521{
7522 /* The lazy approach for now... */
7523 PCRTUINT128U pu128Src;
7524 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
7525 IEM_ACCESS_DATA_R, (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7526 if (rc == VINF_SUCCESS)
7527 {
7528 pu128Dst->au64[0] = pu128Src->au64[0];
7529 pu128Dst->au64[1] = pu128Src->au64[1];
7530 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7531 Log9(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7532 }
7533 return rc;
7534}
7535
7536
7537#ifdef IEM_WITH_SETJMP
7538/**
7539 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7540 * related, longjmp on error.
7541 *
7542 * Raises \#GP(0) if not aligned.
7543 *
7544 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7545 * @param pu128Dst Where to return the qword.
7546 * @param iSegReg The index of the segment register to use for
7547 * this access. The base and limits are checked.
7548 * @param GCPtrMem The address of the guest memory.
7549 */
7550void iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg,
7551 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7552{
7553 /* The lazy approach for now... */
7554 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7555 (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7556 pu128Dst->au64[0] = pu128Src->au64[0];
7557 pu128Dst->au64[1] = pu128Src->au64[1];
7558 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7559 Log9(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7560}
7561#endif
7562
7563
7564/**
7565 * Fetches a data oword (octo word), generally AVX related.
7566 *
7567 * @returns Strict VBox status code.
7568 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7569 * @param pu256Dst Where to return the qword.
7570 * @param iSegReg The index of the segment register to use for
7571 * this access. The base and limits are checked.
7572 * @param GCPtrMem The address of the guest memory.
7573 */
7574VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7575{
7576 /* The lazy approach for now... */
7577 PCRTUINT256U pu256Src;
7578 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7579 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7580 if (rc == VINF_SUCCESS)
7581 {
7582 pu256Dst->au64[0] = pu256Src->au64[0];
7583 pu256Dst->au64[1] = pu256Src->au64[1];
7584 pu256Dst->au64[2] = pu256Src->au64[2];
7585 pu256Dst->au64[3] = pu256Src->au64[3];
7586 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7587 Log9(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7588 }
7589 return rc;
7590}
7591
7592
7593#ifdef IEM_WITH_SETJMP
7594/**
7595 * Fetches a data oword (octo word), generally AVX related.
7596 *
7597 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7598 * @param pu256Dst Where to return the qword.
7599 * @param iSegReg The index of the segment register to use for
7600 * this access. The base and limits are checked.
7601 * @param GCPtrMem The address of the guest memory.
7602 */
7603void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7604{
7605 /* The lazy approach for now... */
7606 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem,
7607 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7608 pu256Dst->au64[0] = pu256Src->au64[0];
7609 pu256Dst->au64[1] = pu256Src->au64[1];
7610 pu256Dst->au64[2] = pu256Src->au64[2];
7611 pu256Dst->au64[3] = pu256Src->au64[3];
7612 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7613 Log9(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7614}
7615#endif
7616
7617
7618/**
7619 * Fetches a data oword (octo word) at an aligned address, generally AVX
7620 * related.
7621 *
7622 * Raises \#GP(0) if not aligned.
7623 *
7624 * @returns Strict VBox status code.
7625 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7626 * @param pu256Dst Where to return the qword.
7627 * @param iSegReg The index of the segment register to use for
7628 * this access. The base and limits are checked.
7629 * @param GCPtrMem The address of the guest memory.
7630 */
7631VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7632{
7633 /* The lazy approach for now... */
7634 PCRTUINT256U pu256Src;
7635 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7636 IEM_ACCESS_DATA_R, (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7637 if (rc == VINF_SUCCESS)
7638 {
7639 pu256Dst->au64[0] = pu256Src->au64[0];
7640 pu256Dst->au64[1] = pu256Src->au64[1];
7641 pu256Dst->au64[2] = pu256Src->au64[2];
7642 pu256Dst->au64[3] = pu256Src->au64[3];
7643 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7644 Log9(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7645 }
7646 return rc;
7647}
7648
7649
7650#ifdef IEM_WITH_SETJMP
7651/**
7652 * Fetches a data oword (octo word) at an aligned address, generally AVX
7653 * related, longjmp on error.
7654 *
7655 * Raises \#GP(0) if not aligned.
7656 *
7657 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7658 * @param pu256Dst Where to return the qword.
7659 * @param iSegReg The index of the segment register to use for
7660 * this access. The base and limits are checked.
7661 * @param GCPtrMem The address of the guest memory.
7662 */
7663void iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg,
7664 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7665{
7666 /* The lazy approach for now... */
7667 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7668 (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7669 pu256Dst->au64[0] = pu256Src->au64[0];
7670 pu256Dst->au64[1] = pu256Src->au64[1];
7671 pu256Dst->au64[2] = pu256Src->au64[2];
7672 pu256Dst->au64[3] = pu256Src->au64[3];
7673 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7674 Log9(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7675}
7676#endif
7677
7678
7679
7680/**
7681 * Fetches a descriptor register (lgdt, lidt).
7682 *
7683 * @returns Strict VBox status code.
7684 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7685 * @param pcbLimit Where to return the limit.
7686 * @param pGCPtrBase Where to return the base.
7687 * @param iSegReg The index of the segment register to use for
7688 * this access. The base and limits are checked.
7689 * @param GCPtrMem The address of the guest memory.
7690 * @param enmOpSize The effective operand size.
7691 */
7692VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7693 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7694{
7695 /*
7696 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7697 * little special:
7698 * - The two reads are done separately.
7699 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7700 * - We suspect the 386 to actually commit the limit before the base in
7701 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7702 * don't try emulate this eccentric behavior, because it's not well
7703 * enough understood and rather hard to trigger.
7704 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7705 */
7706 VBOXSTRICTRC rcStrict;
7707 if (IEM_IS_64BIT_CODE(pVCpu))
7708 {
7709 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7710 if (rcStrict == VINF_SUCCESS)
7711 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7712 }
7713 else
7714 {
7715 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7716 if (enmOpSize == IEMMODE_32BIT)
7717 {
7718 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7719 {
7720 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7721 if (rcStrict == VINF_SUCCESS)
7722 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7723 }
7724 else
7725 {
7726 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7727 if (rcStrict == VINF_SUCCESS)
7728 {
7729 *pcbLimit = (uint16_t)uTmp;
7730 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7731 }
7732 }
7733 if (rcStrict == VINF_SUCCESS)
7734 *pGCPtrBase = uTmp;
7735 }
7736 else
7737 {
7738 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7739 if (rcStrict == VINF_SUCCESS)
7740 {
7741 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7742 if (rcStrict == VINF_SUCCESS)
7743 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7744 }
7745 }
7746 }
7747 return rcStrict;
7748}
7749
7750
7751
7752/**
7753 * Stores a data byte.
7754 *
7755 * @returns Strict VBox status code.
7756 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7757 * @param iSegReg The index of the segment register to use for
7758 * this access. The base and limits are checked.
7759 * @param GCPtrMem The address of the guest memory.
7760 * @param u8Value The value to store.
7761 */
7762VBOXSTRICTRC iemMemStoreDataU8(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) RT_NOEXCEPT
7763{
7764 /* The lazy approach for now... */
7765 uint8_t *pu8Dst;
7766 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 0);
7767 if (rc == VINF_SUCCESS)
7768 {
7769 *pu8Dst = u8Value;
7770 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7771 Log8(("IEM WR byte %d|%RGv: %#04x\n", iSegReg, GCPtrMem, u8Value));
7772 }
7773 return rc;
7774}
7775
7776
7777#ifdef IEM_WITH_SETJMP
7778/**
7779 * Stores a data byte, longjmp on error.
7780 *
7781 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7782 * @param iSegReg The index of the segment register to use for
7783 * this access. The base and limits are checked.
7784 * @param GCPtrMem The address of the guest memory.
7785 * @param u8Value The value to store.
7786 */
7787void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) IEM_NOEXCEPT_MAY_LONGJMP
7788{
7789 /* The lazy approach for now... */
7790 Log8(("IEM WR byte %d|%RGv: %#04x\n", iSegReg, GCPtrMem, u8Value));
7791 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 0);
7792 *pu8Dst = u8Value;
7793 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7794}
7795#endif
7796
7797
7798/**
7799 * Stores a data word.
7800 *
7801 * @returns Strict VBox status code.
7802 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7803 * @param iSegReg The index of the segment register to use for
7804 * this access. The base and limits are checked.
7805 * @param GCPtrMem The address of the guest memory.
7806 * @param u16Value The value to store.
7807 */
7808VBOXSTRICTRC iemMemStoreDataU16(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) RT_NOEXCEPT
7809{
7810 /* The lazy approach for now... */
7811 uint16_t *pu16Dst;
7812 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem,
7813 IEM_ACCESS_DATA_W, sizeof(*pu16Dst) - 1);
7814 if (rc == VINF_SUCCESS)
7815 {
7816 *pu16Dst = u16Value;
7817 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7818 Log8(("IEM WR word %d|%RGv: %#06x\n", iSegReg, GCPtrMem, u16Value));
7819 }
7820 return rc;
7821}
7822
7823
7824#ifdef IEM_WITH_SETJMP
7825/**
7826 * Stores a data word, longjmp on error.
7827 *
7828 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7829 * @param iSegReg The index of the segment register to use for
7830 * this access. The base and limits are checked.
7831 * @param GCPtrMem The address of the guest memory.
7832 * @param u16Value The value to store.
7833 */
7834void iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) IEM_NOEXCEPT_MAY_LONGJMP
7835{
7836 /* The lazy approach for now... */
7837 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem,
7838 IEM_ACCESS_DATA_W, sizeof(*pu16Dst) - 1);
7839 *pu16Dst = u16Value;
7840 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7841 Log8(("IEM WR word %d|%RGv: %#06x\n", iSegReg, GCPtrMem, u16Value));
7842}
7843#endif
7844
7845
7846/**
7847 * Stores a data dword.
7848 *
7849 * @returns Strict VBox status code.
7850 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7851 * @param iSegReg The index of the segment register to use for
7852 * this access. The base and limits are checked.
7853 * @param GCPtrMem The address of the guest memory.
7854 * @param u32Value The value to store.
7855 */
7856VBOXSTRICTRC iemMemStoreDataU32(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) RT_NOEXCEPT
7857{
7858 /* The lazy approach for now... */
7859 uint32_t *pu32Dst;
7860 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem,
7861 IEM_ACCESS_DATA_W, sizeof(*pu32Dst) - 1);
7862 if (rc == VINF_SUCCESS)
7863 {
7864 *pu32Dst = u32Value;
7865 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7866 Log8(("IEM WR dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, u32Value));
7867 }
7868 return rc;
7869}
7870
7871
7872#ifdef IEM_WITH_SETJMP
7873/**
7874 * Stores a data dword.
7875 *
7876 * @returns Strict VBox status code.
7877 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7878 * @param iSegReg The index of the segment register to use for
7879 * this access. The base and limits are checked.
7880 * @param GCPtrMem The address of the guest memory.
7881 * @param u32Value The value to store.
7882 */
7883void iemMemStoreDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) IEM_NOEXCEPT_MAY_LONGJMP
7884{
7885 /* The lazy approach for now... */
7886 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem,
7887 IEM_ACCESS_DATA_W, sizeof(*pu32Dst) - 1);
7888 *pu32Dst = u32Value;
7889 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7890 Log8(("IEM WR dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, u32Value));
7891}
7892#endif
7893
7894
7895/**
7896 * Stores a data qword.
7897 *
7898 * @returns Strict VBox status code.
7899 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7900 * @param iSegReg The index of the segment register to use for
7901 * this access. The base and limits are checked.
7902 * @param GCPtrMem The address of the guest memory.
7903 * @param u64Value The value to store.
7904 */
7905VBOXSTRICTRC iemMemStoreDataU64(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) RT_NOEXCEPT
7906{
7907 /* The lazy approach for now... */
7908 uint64_t *pu64Dst;
7909 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem,
7910 IEM_ACCESS_DATA_W, sizeof(*pu64Dst) - 1);
7911 if (rc == VINF_SUCCESS)
7912 {
7913 *pu64Dst = u64Value;
7914 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7915 Log8(("IEM WR qword %d|%RGv: %#018RX64\n", iSegReg, GCPtrMem, u64Value));
7916 }
7917 return rc;
7918}
7919
7920
7921#ifdef IEM_WITH_SETJMP
7922/**
7923 * Stores a data qword, longjmp on error.
7924 *
7925 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7926 * @param iSegReg The index of the segment register to use for
7927 * this access. The base and limits are checked.
7928 * @param GCPtrMem The address of the guest memory.
7929 * @param u64Value The value to store.
7930 */
7931void iemMemStoreDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) IEM_NOEXCEPT_MAY_LONGJMP
7932{
7933 /* The lazy approach for now... */
7934 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem,
7935 IEM_ACCESS_DATA_W, sizeof(*pu64Dst) - 1);
7936 *pu64Dst = u64Value;
7937 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7938 Log8(("IEM WR qword %d|%RGv: %#018RX64\n", iSegReg, GCPtrMem, u64Value));
7939}
7940#endif
7941
7942
7943/**
7944 * Stores a data dqword.
7945 *
7946 * @returns Strict VBox status code.
7947 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7948 * @param iSegReg The index of the segment register to use for
7949 * this access. The base and limits are checked.
7950 * @param GCPtrMem The address of the guest memory.
7951 * @param u128Value The value to store.
7952 */
7953VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7954{
7955 /* The lazy approach for now... */
7956 PRTUINT128U pu128Dst;
7957 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7958 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7959 if (rc == VINF_SUCCESS)
7960 {
7961 pu128Dst->au64[0] = u128Value.au64[0];
7962 pu128Dst->au64[1] = u128Value.au64[1];
7963 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7964 Log8(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7965 }
7966 return rc;
7967}
7968
7969
7970#ifdef IEM_WITH_SETJMP
7971/**
7972 * Stores a data dqword, longjmp on error.
7973 *
7974 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7975 * @param iSegReg The index of the segment register to use for
7976 * this access. The base and limits are checked.
7977 * @param GCPtrMem The address of the guest memory.
7978 * @param u128Value The value to store.
7979 */
7980void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7981{
7982 /* The lazy approach for now... */
7983 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7984 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7985 pu128Dst->au64[0] = u128Value.au64[0];
7986 pu128Dst->au64[1] = u128Value.au64[1];
7987 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7988 Log8(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7989}
7990#endif
7991
7992
7993/**
7994 * Stores a data dqword, SSE aligned.
7995 *
7996 * @returns Strict VBox status code.
7997 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7998 * @param iSegReg The index of the segment register to use for
7999 * this access. The base and limits are checked.
8000 * @param GCPtrMem The address of the guest memory.
8001 * @param u128Value The value to store.
8002 */
8003VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
8004{
8005 /* The lazy approach for now... */
8006 PRTUINT128U pu128Dst;
8007 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
8008 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
8009 if (rc == VINF_SUCCESS)
8010 {
8011 pu128Dst->au64[0] = u128Value.au64[0];
8012 pu128Dst->au64[1] = u128Value.au64[1];
8013 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
8014 Log8(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
8015 }
8016 return rc;
8017}
8018
8019
8020#ifdef IEM_WITH_SETJMP
8021/**
8022 * Stores a data dqword, SSE aligned.
8023 *
8024 * @returns Strict VBox status code.
8025 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8026 * @param iSegReg The index of the segment register to use for
8027 * this access. The base and limits are checked.
8028 * @param GCPtrMem The address of the guest memory.
8029 * @param u128Value The value to store.
8030 */
8031void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
8032 RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
8033{
8034 /* The lazy approach for now... */
8035 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
8036 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
8037 pu128Dst->au64[0] = u128Value.au64[0];
8038 pu128Dst->au64[1] = u128Value.au64[1];
8039 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
8040 Log8(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
8041}
8042#endif
8043
8044
8045/**
8046 * Stores a data dqword.
8047 *
8048 * @returns Strict VBox status code.
8049 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8050 * @param iSegReg The index of the segment register to use for
8051 * this access. The base and limits are checked.
8052 * @param GCPtrMem The address of the guest memory.
8053 * @param pu256Value Pointer to the value to store.
8054 */
8055VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
8056{
8057 /* The lazy approach for now... */
8058 PRTUINT256U pu256Dst;
8059 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
8060 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
8061 if (rc == VINF_SUCCESS)
8062 {
8063 pu256Dst->au64[0] = pu256Value->au64[0];
8064 pu256Dst->au64[1] = pu256Value->au64[1];
8065 pu256Dst->au64[2] = pu256Value->au64[2];
8066 pu256Dst->au64[3] = pu256Value->au64[3];
8067 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
8068 Log8(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
8069 }
8070 return rc;
8071}
8072
8073
8074#ifdef IEM_WITH_SETJMP
8075/**
8076 * Stores a data dqword, longjmp on error.
8077 *
8078 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8079 * @param iSegReg The index of the segment register to use for
8080 * this access. The base and limits are checked.
8081 * @param GCPtrMem The address of the guest memory.
8082 * @param pu256Value Pointer to the value to store.
8083 */
8084void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
8085{
8086 /* The lazy approach for now... */
8087 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
8088 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
8089 pu256Dst->au64[0] = pu256Value->au64[0];
8090 pu256Dst->au64[1] = pu256Value->au64[1];
8091 pu256Dst->au64[2] = pu256Value->au64[2];
8092 pu256Dst->au64[3] = pu256Value->au64[3];
8093 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
8094 Log8(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
8095}
8096#endif
8097
8098
8099/**
8100 * Stores a data dqword, AVX \#GP(0) aligned.
8101 *
8102 * @returns Strict VBox status code.
8103 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8104 * @param iSegReg The index of the segment register to use for
8105 * this access. The base and limits are checked.
8106 * @param GCPtrMem The address of the guest memory.
8107 * @param pu256Value Pointer to the value to store.
8108 */
8109VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
8110{
8111 /* The lazy approach for now... */
8112 PRTUINT256U pu256Dst;
8113 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
8114 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
8115 if (rc == VINF_SUCCESS)
8116 {
8117 pu256Dst->au64[0] = pu256Value->au64[0];
8118 pu256Dst->au64[1] = pu256Value->au64[1];
8119 pu256Dst->au64[2] = pu256Value->au64[2];
8120 pu256Dst->au64[3] = pu256Value->au64[3];
8121 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
8122 Log8(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
8123 }
8124 return rc;
8125}
8126
8127
8128#ifdef IEM_WITH_SETJMP
8129/**
8130 * Stores a data dqword, AVX aligned.
8131 *
8132 * @returns Strict VBox status code.
8133 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8134 * @param iSegReg The index of the segment register to use for
8135 * this access. The base and limits are checked.
8136 * @param GCPtrMem The address of the guest memory.
8137 * @param pu256Value Pointer to the value to store.
8138 */
8139void iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
8140 PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
8141{
8142 /* The lazy approach for now... */
8143 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
8144 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
8145 pu256Dst->au64[0] = pu256Value->au64[0];
8146 pu256Dst->au64[1] = pu256Value->au64[1];
8147 pu256Dst->au64[2] = pu256Value->au64[2];
8148 pu256Dst->au64[3] = pu256Value->au64[3];
8149 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
8150 Log8(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
8151}
8152#endif
8153
8154
8155/**
8156 * Stores a descriptor register (sgdt, sidt).
8157 *
8158 * @returns Strict VBox status code.
8159 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8160 * @param cbLimit The limit.
8161 * @param GCPtrBase The base address.
8162 * @param iSegReg The index of the segment register to use for
8163 * this access. The base and limits are checked.
8164 * @param GCPtrMem The address of the guest memory.
8165 */
8166VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8167{
8168 /*
8169 * The SIDT and SGDT instructions actually stores the data using two
8170 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
8171 * does not respond to opsize prefixes.
8172 */
8173 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
8174 if (rcStrict == VINF_SUCCESS)
8175 {
8176 if (IEM_IS_16BIT_CODE(pVCpu))
8177 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
8178 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
8179 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
8180 else if (IEM_IS_32BIT_CODE(pVCpu))
8181 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
8182 else
8183 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
8184 }
8185 return rcStrict;
8186}
8187
8188
8189/**
8190 * Pushes a word onto the stack.
8191 *
8192 * @returns Strict VBox status code.
8193 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8194 * @param u16Value The value to push.
8195 */
8196VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value) RT_NOEXCEPT
8197{
8198 /* Increment the stack pointer. */
8199 uint64_t uNewRsp;
8200 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
8201
8202 /* Write the word the lazy way. */
8203 uint16_t *pu16Dst;
8204 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop,
8205 IEM_ACCESS_STACK_W, sizeof(*pu16Dst) - 1);
8206 if (rc == VINF_SUCCESS)
8207 {
8208 *pu16Dst = u16Value;
8209 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
8210 }
8211
8212 /* Commit the new RSP value unless we an access handler made trouble. */
8213 if (rc == VINF_SUCCESS)
8214 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8215
8216 return rc;
8217}
8218
8219
8220/**
8221 * Pushes a dword onto the stack.
8222 *
8223 * @returns Strict VBox status code.
8224 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8225 * @param u32Value The value to push.
8226 */
8227VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
8228{
8229 /* Increment the stack pointer. */
8230 uint64_t uNewRsp;
8231 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
8232
8233 /* Write the dword the lazy way. */
8234 uint32_t *pu32Dst;
8235 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop,
8236 IEM_ACCESS_STACK_W, sizeof(*pu32Dst) - 1);
8237 if (rc == VINF_SUCCESS)
8238 {
8239 *pu32Dst = u32Value;
8240 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
8241 }
8242
8243 /* Commit the new RSP value unless we an access handler made trouble. */
8244 if (rc == VINF_SUCCESS)
8245 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8246
8247 return rc;
8248}
8249
8250
8251/**
8252 * Pushes a dword segment register value onto the stack.
8253 *
8254 * @returns Strict VBox status code.
8255 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8256 * @param u32Value The value to push.
8257 */
8258VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
8259{
8260 /* Increment the stack pointer. */
8261 uint64_t uNewRsp;
8262 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
8263
8264 /* The intel docs talks about zero extending the selector register
8265 value. My actual intel CPU here might be zero extending the value
8266 but it still only writes the lower word... */
8267 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
8268 * happens when crossing an electric page boundrary, is the high word checked
8269 * for write accessibility or not? Probably it is. What about segment limits?
8270 * It appears this behavior is also shared with trap error codes.
8271 *
8272 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
8273 * ancient hardware when it actually did change. */
8274 uint16_t *pu16Dst;
8275 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop,
8276 IEM_ACCESS_STACK_RW, sizeof(*pu16Dst) - 1); /** @todo 2 or 4 alignment check for PUSH SS? */
8277 if (rc == VINF_SUCCESS)
8278 {
8279 *pu16Dst = (uint16_t)u32Value;
8280 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
8281 }
8282
8283 /* Commit the new RSP value unless we an access handler made trouble. */
8284 if (rc == VINF_SUCCESS)
8285 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8286
8287 return rc;
8288}
8289
8290
8291/**
8292 * Pushes a qword onto the stack.
8293 *
8294 * @returns Strict VBox status code.
8295 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8296 * @param u64Value The value to push.
8297 */
8298VBOXSTRICTRC iemMemStackPushU64(PVMCPUCC pVCpu, uint64_t u64Value) RT_NOEXCEPT
8299{
8300 /* Increment the stack pointer. */
8301 uint64_t uNewRsp;
8302 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
8303
8304 /* Write the word the lazy way. */
8305 uint64_t *pu64Dst;
8306 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop,
8307 IEM_ACCESS_STACK_W, sizeof(*pu64Dst) - 1);
8308 if (rc == VINF_SUCCESS)
8309 {
8310 *pu64Dst = u64Value;
8311 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
8312 }
8313
8314 /* Commit the new RSP value unless we an access handler made trouble. */
8315 if (rc == VINF_SUCCESS)
8316 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8317
8318 return rc;
8319}
8320
8321
8322/**
8323 * Pops a word from the stack.
8324 *
8325 * @returns Strict VBox status code.
8326 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8327 * @param pu16Value Where to store the popped value.
8328 */
8329VBOXSTRICTRC iemMemStackPopU16(PVMCPUCC pVCpu, uint16_t *pu16Value) RT_NOEXCEPT
8330{
8331 /* Increment the stack pointer. */
8332 uint64_t uNewRsp;
8333 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
8334
8335 /* Write the word the lazy way. */
8336 uint16_t const *pu16Src;
8337 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop,
8338 IEM_ACCESS_STACK_R, sizeof(*pu16Src) - 1);
8339 if (rc == VINF_SUCCESS)
8340 {
8341 *pu16Value = *pu16Src;
8342 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
8343
8344 /* Commit the new RSP value. */
8345 if (rc == VINF_SUCCESS)
8346 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8347 }
8348
8349 return rc;
8350}
8351
8352
8353/**
8354 * Pops a dword from the stack.
8355 *
8356 * @returns Strict VBox status code.
8357 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8358 * @param pu32Value Where to store the popped value.
8359 */
8360VBOXSTRICTRC iemMemStackPopU32(PVMCPUCC pVCpu, uint32_t *pu32Value) RT_NOEXCEPT
8361{
8362 /* Increment the stack pointer. */
8363 uint64_t uNewRsp;
8364 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
8365
8366 /* Write the word the lazy way. */
8367 uint32_t const *pu32Src;
8368 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop,
8369 IEM_ACCESS_STACK_R, sizeof(*pu32Src) - 1);
8370 if (rc == VINF_SUCCESS)
8371 {
8372 *pu32Value = *pu32Src;
8373 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
8374
8375 /* Commit the new RSP value. */
8376 if (rc == VINF_SUCCESS)
8377 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8378 }
8379
8380 return rc;
8381}
8382
8383
8384/**
8385 * Pops a qword from the stack.
8386 *
8387 * @returns Strict VBox status code.
8388 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8389 * @param pu64Value Where to store the popped value.
8390 */
8391VBOXSTRICTRC iemMemStackPopU64(PVMCPUCC pVCpu, uint64_t *pu64Value) RT_NOEXCEPT
8392{
8393 /* Increment the stack pointer. */
8394 uint64_t uNewRsp;
8395 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
8396
8397 /* Write the word the lazy way. */
8398 uint64_t const *pu64Src;
8399 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop,
8400 IEM_ACCESS_STACK_R, sizeof(*pu64Src) - 1);
8401 if (rc == VINF_SUCCESS)
8402 {
8403 *pu64Value = *pu64Src;
8404 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8405
8406 /* Commit the new RSP value. */
8407 if (rc == VINF_SUCCESS)
8408 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8409 }
8410
8411 return rc;
8412}
8413
8414
8415/**
8416 * Pushes a word onto the stack, using a temporary stack pointer.
8417 *
8418 * @returns Strict VBox status code.
8419 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8420 * @param u16Value The value to push.
8421 * @param pTmpRsp Pointer to the temporary stack pointer.
8422 */
8423VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPUCC pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8424{
8425 /* Increment the stack pointer. */
8426 RTUINT64U NewRsp = *pTmpRsp;
8427 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
8428
8429 /* Write the word the lazy way. */
8430 uint16_t *pu16Dst;
8431 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop,
8432 IEM_ACCESS_STACK_W, sizeof(*pu16Dst) - 1);
8433 if (rc == VINF_SUCCESS)
8434 {
8435 *pu16Dst = u16Value;
8436 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
8437 }
8438
8439 /* Commit the new RSP value unless we an access handler made trouble. */
8440 if (rc == VINF_SUCCESS)
8441 *pTmpRsp = NewRsp;
8442
8443 return rc;
8444}
8445
8446
8447/**
8448 * Pushes a dword onto the stack, using a temporary stack pointer.
8449 *
8450 * @returns Strict VBox status code.
8451 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8452 * @param u32Value The value to push.
8453 * @param pTmpRsp Pointer to the temporary stack pointer.
8454 */
8455VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPUCC pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8456{
8457 /* Increment the stack pointer. */
8458 RTUINT64U NewRsp = *pTmpRsp;
8459 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
8460
8461 /* Write the word the lazy way. */
8462 uint32_t *pu32Dst;
8463 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop,
8464 IEM_ACCESS_STACK_W, sizeof(*pu32Dst) - 1);
8465 if (rc == VINF_SUCCESS)
8466 {
8467 *pu32Dst = u32Value;
8468 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
8469 }
8470
8471 /* Commit the new RSP value unless we an access handler made trouble. */
8472 if (rc == VINF_SUCCESS)
8473 *pTmpRsp = NewRsp;
8474
8475 return rc;
8476}
8477
8478
8479/**
8480 * Pushes a dword onto the stack, using a temporary stack pointer.
8481 *
8482 * @returns Strict VBox status code.
8483 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8484 * @param u64Value The value to push.
8485 * @param pTmpRsp Pointer to the temporary stack pointer.
8486 */
8487VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPUCC pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8488{
8489 /* Increment the stack pointer. */
8490 RTUINT64U NewRsp = *pTmpRsp;
8491 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
8492
8493 /* Write the word the lazy way. */
8494 uint64_t *pu64Dst;
8495 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop,
8496 IEM_ACCESS_STACK_W, sizeof(*pu64Dst) - 1);
8497 if (rc == VINF_SUCCESS)
8498 {
8499 *pu64Dst = u64Value;
8500 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
8501 }
8502
8503 /* Commit the new RSP value unless we an access handler made trouble. */
8504 if (rc == VINF_SUCCESS)
8505 *pTmpRsp = NewRsp;
8506
8507 return rc;
8508}
8509
8510
8511/**
8512 * Pops a word from the stack, using a temporary stack pointer.
8513 *
8514 * @returns Strict VBox status code.
8515 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8516 * @param pu16Value Where to store the popped value.
8517 * @param pTmpRsp Pointer to the temporary stack pointer.
8518 */
8519VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPUCC pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8520{
8521 /* Increment the stack pointer. */
8522 RTUINT64U NewRsp = *pTmpRsp;
8523 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
8524
8525 /* Write the word the lazy way. */
8526 uint16_t const *pu16Src;
8527 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop,
8528 IEM_ACCESS_STACK_R, sizeof(*pu16Src) - 1);
8529 if (rc == VINF_SUCCESS)
8530 {
8531 *pu16Value = *pu16Src;
8532 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
8533
8534 /* Commit the new RSP value. */
8535 if (rc == VINF_SUCCESS)
8536 *pTmpRsp = NewRsp;
8537 }
8538
8539 return rc;
8540}
8541
8542
8543/**
8544 * Pops a dword from the stack, using a temporary stack pointer.
8545 *
8546 * @returns Strict VBox status code.
8547 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8548 * @param pu32Value Where to store the popped value.
8549 * @param pTmpRsp Pointer to the temporary stack pointer.
8550 */
8551VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPUCC pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8552{
8553 /* Increment the stack pointer. */
8554 RTUINT64U NewRsp = *pTmpRsp;
8555 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
8556
8557 /* Write the word the lazy way. */
8558 uint32_t const *pu32Src;
8559 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop,
8560 IEM_ACCESS_STACK_R, sizeof(*pu32Src) - 1);
8561 if (rc == VINF_SUCCESS)
8562 {
8563 *pu32Value = *pu32Src;
8564 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
8565
8566 /* Commit the new RSP value. */
8567 if (rc == VINF_SUCCESS)
8568 *pTmpRsp = NewRsp;
8569 }
8570
8571 return rc;
8572}
8573
8574
8575/**
8576 * Pops a qword from the stack, using a temporary stack pointer.
8577 *
8578 * @returns Strict VBox status code.
8579 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8580 * @param pu64Value Where to store the popped value.
8581 * @param pTmpRsp Pointer to the temporary stack pointer.
8582 */
8583VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPUCC pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8584{
8585 /* Increment the stack pointer. */
8586 RTUINT64U NewRsp = *pTmpRsp;
8587 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
8588
8589 /* Write the word the lazy way. */
8590 uint64_t const *pu64Src;
8591 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop,
8592 IEM_ACCESS_STACK_R, sizeof(*pu64Src) - 1);
8593 if (rcStrict == VINF_SUCCESS)
8594 {
8595 *pu64Value = *pu64Src;
8596 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8597
8598 /* Commit the new RSP value. */
8599 if (rcStrict == VINF_SUCCESS)
8600 *pTmpRsp = NewRsp;
8601 }
8602
8603 return rcStrict;
8604}
8605
8606
8607/**
8608 * Begin a special stack push (used by interrupt, exceptions and such).
8609 *
8610 * This will raise \#SS or \#PF if appropriate.
8611 *
8612 * @returns Strict VBox status code.
8613 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8614 * @param cbMem The number of bytes to push onto the stack.
8615 * @param cbAlign The alignment mask (7, 3, 1).
8616 * @param ppvMem Where to return the pointer to the stack memory.
8617 * As with the other memory functions this could be
8618 * direct access or bounce buffered access, so
8619 * don't commit register until the commit call
8620 * succeeds.
8621 * @param puNewRsp Where to return the new RSP value. This must be
8622 * passed unchanged to
8623 * iemMemStackPushCommitSpecial().
8624 */
8625VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8626 void **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8627{
8628 Assert(cbMem < UINT8_MAX);
8629 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
8630 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop,
8631 IEM_ACCESS_STACK_W, cbAlign);
8632}
8633
8634
8635/**
8636 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
8637 *
8638 * This will update the rSP.
8639 *
8640 * @returns Strict VBox status code.
8641 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8642 * @param pvMem The pointer returned by
8643 * iemMemStackPushBeginSpecial().
8644 * @param uNewRsp The new RSP value returned by
8645 * iemMemStackPushBeginSpecial().
8646 */
8647VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp) RT_NOEXCEPT
8648{
8649 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
8650 if (rcStrict == VINF_SUCCESS)
8651 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8652 return rcStrict;
8653}
8654
8655
8656/**
8657 * Begin a special stack pop (used by iret, retf and such).
8658 *
8659 * This will raise \#SS or \#PF if appropriate.
8660 *
8661 * @returns Strict VBox status code.
8662 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8663 * @param cbMem The number of bytes to pop from the stack.
8664 * @param cbAlign The alignment mask (7, 3, 1).
8665 * @param ppvMem Where to return the pointer to the stack memory.
8666 * @param puNewRsp Where to return the new RSP value. This must be
8667 * assigned to CPUMCTX::rsp manually some time
8668 * after iemMemStackPopDoneSpecial() has been
8669 * called.
8670 */
8671VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8672 void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8673{
8674 Assert(cbMem < UINT8_MAX);
8675 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
8676 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
8677}
8678
8679
8680/**
8681 * Continue a special stack pop (used by iret and retf), for the purpose of
8682 * retrieving a new stack pointer.
8683 *
8684 * This will raise \#SS or \#PF if appropriate.
8685 *
8686 * @returns Strict VBox status code.
8687 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8688 * @param off Offset from the top of the stack. This is zero
8689 * except in the retf case.
8690 * @param cbMem The number of bytes to pop from the stack.
8691 * @param ppvMem Where to return the pointer to the stack memory.
8692 * @param uCurNewRsp The current uncommitted RSP value. (No need to
8693 * return this because all use of this function is
8694 * to retrieve a new value and anything we return
8695 * here would be discarded.)
8696 */
8697VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
8698 void const **ppvMem, uint64_t uCurNewRsp) RT_NOEXCEPT
8699{
8700 Assert(cbMem < UINT8_MAX);
8701
8702 /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */
8703 RTGCPTR GCPtrTop;
8704 if (IEM_IS_64BIT_CODE(pVCpu))
8705 GCPtrTop = uCurNewRsp;
8706 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
8707 GCPtrTop = (uint32_t)uCurNewRsp;
8708 else
8709 GCPtrTop = (uint16_t)uCurNewRsp;
8710
8711 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
8712 0 /* checked in iemMemStackPopBeginSpecial */);
8713}
8714
8715
8716/**
8717 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
8718 * iemMemStackPopContinueSpecial).
8719 *
8720 * The caller will manually commit the rSP.
8721 *
8722 * @returns Strict VBox status code.
8723 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8724 * @param pvMem The pointer returned by
8725 * iemMemStackPopBeginSpecial() or
8726 * iemMemStackPopContinueSpecial().
8727 */
8728VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem) RT_NOEXCEPT
8729{
8730 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8731}
8732
8733
8734/**
8735 * Fetches a system table byte.
8736 *
8737 * @returns Strict VBox status code.
8738 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8739 * @param pbDst Where to return the byte.
8740 * @param iSegReg The index of the segment register to use for
8741 * this access. The base and limits are checked.
8742 * @param GCPtrMem The address of the guest memory.
8743 */
8744VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8745{
8746 /* The lazy approach for now... */
8747 uint8_t const *pbSrc;
8748 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8749 if (rc == VINF_SUCCESS)
8750 {
8751 *pbDst = *pbSrc;
8752 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
8753 }
8754 return rc;
8755}
8756
8757
8758/**
8759 * Fetches a system table word.
8760 *
8761 * @returns Strict VBox status code.
8762 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8763 * @param pu16Dst Where to return the word.
8764 * @param iSegReg The index of the segment register to use for
8765 * this access. The base and limits are checked.
8766 * @param GCPtrMem The address of the guest memory.
8767 */
8768VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8769{
8770 /* The lazy approach for now... */
8771 uint16_t const *pu16Src;
8772 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8773 if (rc == VINF_SUCCESS)
8774 {
8775 *pu16Dst = *pu16Src;
8776 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
8777 }
8778 return rc;
8779}
8780
8781
8782/**
8783 * Fetches a system table dword.
8784 *
8785 * @returns Strict VBox status code.
8786 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8787 * @param pu32Dst Where to return the dword.
8788 * @param iSegReg The index of the segment register to use for
8789 * this access. The base and limits are checked.
8790 * @param GCPtrMem The address of the guest memory.
8791 */
8792VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8793{
8794 /* The lazy approach for now... */
8795 uint32_t const *pu32Src;
8796 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8797 if (rc == VINF_SUCCESS)
8798 {
8799 *pu32Dst = *pu32Src;
8800 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
8801 }
8802 return rc;
8803}
8804
8805
8806/**
8807 * Fetches a system table qword.
8808 *
8809 * @returns Strict VBox status code.
8810 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8811 * @param pu64Dst Where to return the qword.
8812 * @param iSegReg The index of the segment register to use for
8813 * this access. The base and limits are checked.
8814 * @param GCPtrMem The address of the guest memory.
8815 */
8816VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8817{
8818 /* The lazy approach for now... */
8819 uint64_t const *pu64Src;
8820 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8821 if (rc == VINF_SUCCESS)
8822 {
8823 *pu64Dst = *pu64Src;
8824 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
8825 }
8826 return rc;
8827}
8828
8829
8830/**
8831 * Fetches a descriptor table entry with caller specified error code.
8832 *
8833 * @returns Strict VBox status code.
8834 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8835 * @param pDesc Where to return the descriptor table entry.
8836 * @param uSel The selector which table entry to fetch.
8837 * @param uXcpt The exception to raise on table lookup error.
8838 * @param uErrorCode The error code associated with the exception.
8839 */
8840static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
8841 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
8842{
8843 AssertPtr(pDesc);
8844 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
8845
8846 /** @todo did the 286 require all 8 bytes to be accessible? */
8847 /*
8848 * Get the selector table base and check bounds.
8849 */
8850 RTGCPTR GCPtrBase;
8851 if (uSel & X86_SEL_LDT)
8852 {
8853 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
8854 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
8855 {
8856 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8857 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
8858 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8859 uErrorCode, 0);
8860 }
8861
8862 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
8863 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
8864 }
8865 else
8866 {
8867 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
8868 {
8869 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
8870 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8871 uErrorCode, 0);
8872 }
8873 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
8874 }
8875
8876 /*
8877 * Read the legacy descriptor and maybe the long mode extensions if
8878 * required.
8879 */
8880 VBOXSTRICTRC rcStrict;
8881 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
8882 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8883 else
8884 {
8885 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
8886 if (rcStrict == VINF_SUCCESS)
8887 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
8888 if (rcStrict == VINF_SUCCESS)
8889 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
8890 if (rcStrict == VINF_SUCCESS)
8891 pDesc->Legacy.au16[3] = 0;
8892 else
8893 return rcStrict;
8894 }
8895
8896 if (rcStrict == VINF_SUCCESS)
8897 {
8898 if ( !IEM_IS_LONG_MODE(pVCpu)
8899 || pDesc->Legacy.Gen.u1DescType)
8900 pDesc->Long.au64[1] = 0;
8901 else if ( (uint32_t)(uSel | X86_SEL_RPL_LDT) + 8
8902 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
8903 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8904 else
8905 {
8906 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8907 /** @todo is this the right exception? */
8908 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8909 }
8910 }
8911 return rcStrict;
8912}
8913
8914
8915/**
8916 * Fetches a descriptor table entry.
8917 *
8918 * @returns Strict VBox status code.
8919 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8920 * @param pDesc Where to return the descriptor table entry.
8921 * @param uSel The selector which table entry to fetch.
8922 * @param uXcpt The exception to raise on table lookup error.
8923 */
8924VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
8925{
8926 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8927}
8928
8929
8930/**
8931 * Marks the selector descriptor as accessed (only non-system descriptors).
8932 *
8933 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8934 * will therefore skip the limit checks.
8935 *
8936 * @returns Strict VBox status code.
8937 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8938 * @param uSel The selector.
8939 */
8940VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
8941{
8942 /*
8943 * Get the selector table base and calculate the entry address.
8944 */
8945 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8946 ? pVCpu->cpum.GstCtx.ldtr.u64Base
8947 : pVCpu->cpum.GstCtx.gdtr.pGdt;
8948 GCPtr += uSel & X86_SEL_MASK;
8949
8950 /*
8951 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8952 * ugly stuff to avoid this. This will make sure it's an atomic access
8953 * as well more or less remove any question about 8-bit or 32-bit accesss.
8954 */
8955 VBOXSTRICTRC rcStrict;
8956 uint32_t volatile *pu32;
8957 if ((GCPtr & 3) == 0)
8958 {
8959 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8960 GCPtr += 2 + 2;
8961 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8962 if (rcStrict != VINF_SUCCESS)
8963 return rcStrict;
8964 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8965 }
8966 else
8967 {
8968 /* The misaligned GDT/LDT case, map the whole thing. */
8969 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8970 if (rcStrict != VINF_SUCCESS)
8971 return rcStrict;
8972 switch ((uintptr_t)pu32 & 3)
8973 {
8974 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8975 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8976 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8977 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8978 }
8979 }
8980
8981 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
8982}
8983
8984/** @} */
8985
8986/** @name Opcode Helpers.
8987 * @{
8988 */
8989
8990/**
8991 * Calculates the effective address of a ModR/M memory operand.
8992 *
8993 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8994 *
8995 * @return Strict VBox status code.
8996 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8997 * @param bRm The ModRM byte.
8998 * @param cbImmAndRspOffset - First byte: The size of any immediate
8999 * following the effective address opcode bytes
9000 * (only for RIP relative addressing).
9001 * - Second byte: RSP displacement (for POP [ESP]).
9002 * @param pGCPtrEff Where to return the effective address.
9003 */
9004VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
9005{
9006 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
9007# define SET_SS_DEF() \
9008 do \
9009 { \
9010 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9011 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
9012 } while (0)
9013
9014 if (!IEM_IS_64BIT_CODE(pVCpu))
9015 {
9016/** @todo Check the effective address size crap! */
9017 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
9018 {
9019 uint16_t u16EffAddr;
9020
9021 /* Handle the disp16 form with no registers first. */
9022 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9023 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9024 else
9025 {
9026 /* Get the displacment. */
9027 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9028 {
9029 case 0: u16EffAddr = 0; break;
9030 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9031 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9032 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
9033 }
9034
9035 /* Add the base and index registers to the disp. */
9036 switch (bRm & X86_MODRM_RM_MASK)
9037 {
9038 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
9039 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
9040 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
9041 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
9042 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
9043 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
9044 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
9045 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
9046 }
9047 }
9048
9049 *pGCPtrEff = u16EffAddr;
9050 }
9051 else
9052 {
9053 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9054 uint32_t u32EffAddr;
9055
9056 /* Handle the disp32 form with no registers first. */
9057 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9058 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9059 else
9060 {
9061 /* Get the register (or SIB) value. */
9062 switch ((bRm & X86_MODRM_RM_MASK))
9063 {
9064 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9065 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9066 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9067 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9068 case 4: /* SIB */
9069 {
9070 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9071
9072 /* Get the index and scale it. */
9073 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9074 {
9075 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9076 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9077 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9078 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9079 case 4: u32EffAddr = 0; /*none */ break;
9080 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9081 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9082 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9083 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9084 }
9085 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9086
9087 /* add base */
9088 switch (bSib & X86_SIB_BASE_MASK)
9089 {
9090 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9091 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9092 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9093 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9094 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9095 case 5:
9096 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9097 {
9098 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9099 SET_SS_DEF();
9100 }
9101 else
9102 {
9103 uint32_t u32Disp;
9104 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9105 u32EffAddr += u32Disp;
9106 }
9107 break;
9108 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9109 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9110 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9111 }
9112 break;
9113 }
9114 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9115 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9116 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9117 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9118 }
9119
9120 /* Get and add the displacement. */
9121 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9122 {
9123 case 0:
9124 break;
9125 case 1:
9126 {
9127 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9128 u32EffAddr += i8Disp;
9129 break;
9130 }
9131 case 2:
9132 {
9133 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9134 u32EffAddr += u32Disp;
9135 break;
9136 }
9137 default:
9138 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
9139 }
9140
9141 }
9142 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9143 *pGCPtrEff = u32EffAddr;
9144 else
9145 {
9146 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9147 *pGCPtrEff = u32EffAddr & UINT16_MAX;
9148 }
9149 }
9150 }
9151 else
9152 {
9153 uint64_t u64EffAddr;
9154
9155 /* Handle the rip+disp32 form with no registers first. */
9156 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9157 {
9158 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9159 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
9160 }
9161 else
9162 {
9163 /* Get the register (or SIB) value. */
9164 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9165 {
9166 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9167 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9168 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9169 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9170 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9171 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9172 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9173 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9174 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9175 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9176 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9177 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9178 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9179 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9180 /* SIB */
9181 case 4:
9182 case 12:
9183 {
9184 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9185
9186 /* Get the index and scale it. */
9187 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9188 {
9189 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9190 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9191 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9192 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9193 case 4: u64EffAddr = 0; /*none */ break;
9194 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9195 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9196 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9197 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9198 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9199 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9200 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9201 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9202 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9203 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9204 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9205 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9206 }
9207 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9208
9209 /* add base */
9210 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9211 {
9212 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9213 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9214 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9215 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9216 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9217 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9218 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9219 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9220 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9221 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9222 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9223 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9224 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9225 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9226 /* complicated encodings */
9227 case 5:
9228 case 13:
9229 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9230 {
9231 if (!pVCpu->iem.s.uRexB)
9232 {
9233 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9234 SET_SS_DEF();
9235 }
9236 else
9237 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9238 }
9239 else
9240 {
9241 uint32_t u32Disp;
9242 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9243 u64EffAddr += (int32_t)u32Disp;
9244 }
9245 break;
9246 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9247 }
9248 break;
9249 }
9250 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9251 }
9252
9253 /* Get and add the displacement. */
9254 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9255 {
9256 case 0:
9257 break;
9258 case 1:
9259 {
9260 int8_t i8Disp;
9261 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9262 u64EffAddr += i8Disp;
9263 break;
9264 }
9265 case 2:
9266 {
9267 uint32_t u32Disp;
9268 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9269 u64EffAddr += (int32_t)u32Disp;
9270 break;
9271 }
9272 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9273 }
9274
9275 }
9276
9277 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9278 *pGCPtrEff = u64EffAddr;
9279 else
9280 {
9281 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9282 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9283 }
9284 }
9285
9286 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
9287 return VINF_SUCCESS;
9288}
9289
9290
9291#ifdef IEM_WITH_SETJMP
9292/**
9293 * Calculates the effective address of a ModR/M memory operand.
9294 *
9295 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9296 *
9297 * May longjmp on internal error.
9298 *
9299 * @return The effective address.
9300 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9301 * @param bRm The ModRM byte.
9302 * @param cbImmAndRspOffset - First byte: The size of any immediate
9303 * following the effective address opcode bytes
9304 * (only for RIP relative addressing).
9305 * - Second byte: RSP displacement (for POP [ESP]).
9306 */
9307RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP
9308{
9309 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
9310# define SET_SS_DEF() \
9311 do \
9312 { \
9313 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9314 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
9315 } while (0)
9316
9317 if (!IEM_IS_64BIT_CODE(pVCpu))
9318 {
9319/** @todo Check the effective address size crap! */
9320 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
9321 {
9322 uint16_t u16EffAddr;
9323
9324 /* Handle the disp16 form with no registers first. */
9325 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9326 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9327 else
9328 {
9329 /* Get the displacment. */
9330 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9331 {
9332 case 0: u16EffAddr = 0; break;
9333 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9334 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9335 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */
9336 }
9337
9338 /* Add the base and index registers to the disp. */
9339 switch (bRm & X86_MODRM_RM_MASK)
9340 {
9341 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
9342 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
9343 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
9344 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
9345 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
9346 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
9347 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
9348 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
9349 }
9350 }
9351
9352 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
9353 return u16EffAddr;
9354 }
9355
9356 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9357 uint32_t u32EffAddr;
9358
9359 /* Handle the disp32 form with no registers first. */
9360 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9361 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9362 else
9363 {
9364 /* Get the register (or SIB) value. */
9365 switch ((bRm & X86_MODRM_RM_MASK))
9366 {
9367 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9368 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9369 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9370 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9371 case 4: /* SIB */
9372 {
9373 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9374
9375 /* Get the index and scale it. */
9376 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9377 {
9378 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9379 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9380 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9381 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9382 case 4: u32EffAddr = 0; /*none */ break;
9383 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9384 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9385 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9386 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9387 }
9388 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9389
9390 /* add base */
9391 switch (bSib & X86_SIB_BASE_MASK)
9392 {
9393 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9394 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9395 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9396 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9397 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9398 case 5:
9399 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9400 {
9401 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9402 SET_SS_DEF();
9403 }
9404 else
9405 {
9406 uint32_t u32Disp;
9407 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9408 u32EffAddr += u32Disp;
9409 }
9410 break;
9411 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9412 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9413 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9414 }
9415 break;
9416 }
9417 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9418 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9419 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9420 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9421 }
9422
9423 /* Get and add the displacement. */
9424 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9425 {
9426 case 0:
9427 break;
9428 case 1:
9429 {
9430 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9431 u32EffAddr += i8Disp;
9432 break;
9433 }
9434 case 2:
9435 {
9436 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9437 u32EffAddr += u32Disp;
9438 break;
9439 }
9440 default:
9441 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */
9442 }
9443 }
9444
9445 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9446 {
9447 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
9448 return u32EffAddr;
9449 }
9450 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9451 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
9452 return u32EffAddr & UINT16_MAX;
9453 }
9454
9455 uint64_t u64EffAddr;
9456
9457 /* Handle the rip+disp32 form with no registers first. */
9458 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9459 {
9460 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9461 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
9462 }
9463 else
9464 {
9465 /* Get the register (or SIB) value. */
9466 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9467 {
9468 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9469 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9470 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9471 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9472 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9473 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9474 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9475 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9476 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9477 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9478 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9479 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9480 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9481 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9482 /* SIB */
9483 case 4:
9484 case 12:
9485 {
9486 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9487
9488 /* Get the index and scale it. */
9489 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9490 {
9491 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9492 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9493 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9494 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9495 case 4: u64EffAddr = 0; /*none */ break;
9496 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9497 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9498 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9499 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9500 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9501 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9502 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9503 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9504 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9505 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9506 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9507 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9508 }
9509 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9510
9511 /* add base */
9512 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9513 {
9514 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9515 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9516 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9517 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9518 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9519 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9520 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9521 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9522 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9523 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9524 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9525 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9526 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9527 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9528 /* complicated encodings */
9529 case 5:
9530 case 13:
9531 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9532 {
9533 if (!pVCpu->iem.s.uRexB)
9534 {
9535 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9536 SET_SS_DEF();
9537 }
9538 else
9539 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9540 }
9541 else
9542 {
9543 uint32_t u32Disp;
9544 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9545 u64EffAddr += (int32_t)u32Disp;
9546 }
9547 break;
9548 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9549 }
9550 break;
9551 }
9552 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9553 }
9554
9555 /* Get and add the displacement. */
9556 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9557 {
9558 case 0:
9559 break;
9560 case 1:
9561 {
9562 int8_t i8Disp;
9563 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9564 u64EffAddr += i8Disp;
9565 break;
9566 }
9567 case 2:
9568 {
9569 uint32_t u32Disp;
9570 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9571 u64EffAddr += (int32_t)u32Disp;
9572 break;
9573 }
9574 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
9575 }
9576
9577 }
9578
9579 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9580 {
9581 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
9582 return u64EffAddr;
9583 }
9584 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9585 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
9586 return u64EffAddr & UINT32_MAX;
9587}
9588#endif /* IEM_WITH_SETJMP */
9589
9590
9591/**
9592 * Calculates the effective address of a ModR/M memory operand, extended version
9593 * for use in the recompilers.
9594 *
9595 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9596 *
9597 * @return Strict VBox status code.
9598 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9599 * @param bRm The ModRM byte.
9600 * @param cbImmAndRspOffset - First byte: The size of any immediate
9601 * following the effective address opcode bytes
9602 * (only for RIP relative addressing).
9603 * - Second byte: RSP displacement (for POP [ESP]).
9604 * @param pGCPtrEff Where to return the effective address.
9605 * @param puInfo Extra info: 32-bit displacement (bits 31:0) and
9606 * SIB byte (bits 39:32).
9607 */
9608VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT
9609{
9610 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
9611# define SET_SS_DEF() \
9612 do \
9613 { \
9614 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9615 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
9616 } while (0)
9617
9618 uint64_t uInfo;
9619 if (!IEM_IS_64BIT_CODE(pVCpu))
9620 {
9621/** @todo Check the effective address size crap! */
9622 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
9623 {
9624 uint16_t u16EffAddr;
9625
9626 /* Handle the disp16 form with no registers first. */
9627 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9628 {
9629 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9630 uInfo = u16EffAddr;
9631 }
9632 else
9633 {
9634 /* Get the displacment. */
9635 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9636 {
9637 case 0: u16EffAddr = 0; break;
9638 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9639 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9640 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
9641 }
9642 uInfo = u16EffAddr;
9643
9644 /* Add the base and index registers to the disp. */
9645 switch (bRm & X86_MODRM_RM_MASK)
9646 {
9647 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
9648 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
9649 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
9650 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
9651 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
9652 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
9653 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
9654 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
9655 }
9656 }
9657
9658 *pGCPtrEff = u16EffAddr;
9659 }
9660 else
9661 {
9662 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9663 uint32_t u32EffAddr;
9664
9665 /* Handle the disp32 form with no registers first. */
9666 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9667 {
9668 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9669 uInfo = u32EffAddr;
9670 }
9671 else
9672 {
9673 /* Get the register (or SIB) value. */
9674 uInfo = 0;
9675 switch ((bRm & X86_MODRM_RM_MASK))
9676 {
9677 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9678 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9679 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9680 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9681 case 4: /* SIB */
9682 {
9683 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9684 uInfo = (uint64_t)bSib << 32;
9685
9686 /* Get the index and scale it. */
9687 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9688 {
9689 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9690 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9691 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9692 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9693 case 4: u32EffAddr = 0; /*none */ break;
9694 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9695 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9696 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9697 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9698 }
9699 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9700
9701 /* add base */
9702 switch (bSib & X86_SIB_BASE_MASK)
9703 {
9704 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9705 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9706 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9707 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9708 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9709 case 5:
9710 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9711 {
9712 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9713 SET_SS_DEF();
9714 }
9715 else
9716 {
9717 uint32_t u32Disp;
9718 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9719 u32EffAddr += u32Disp;
9720 uInfo |= u32Disp;
9721 }
9722 break;
9723 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9724 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9725 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9726 }
9727 break;
9728 }
9729 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9730 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9731 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9732 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9733 }
9734
9735 /* Get and add the displacement. */
9736 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9737 {
9738 case 0:
9739 break;
9740 case 1:
9741 {
9742 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9743 u32EffAddr += i8Disp;
9744 uInfo |= (uint32_t)(int32_t)i8Disp;
9745 break;
9746 }
9747 case 2:
9748 {
9749 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9750 u32EffAddr += u32Disp;
9751 uInfo |= (uint32_t)u32Disp;
9752 break;
9753 }
9754 default:
9755 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
9756 }
9757
9758 }
9759 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9760 *pGCPtrEff = u32EffAddr;
9761 else
9762 {
9763 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9764 *pGCPtrEff = u32EffAddr & UINT16_MAX;
9765 }
9766 }
9767 }
9768 else
9769 {
9770 uint64_t u64EffAddr;
9771
9772 /* Handle the rip+disp32 form with no registers first. */
9773 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9774 {
9775 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9776 uInfo = (uint32_t)u64EffAddr;
9777 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
9778 }
9779 else
9780 {
9781 /* Get the register (or SIB) value. */
9782 uInfo = 0;
9783 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9784 {
9785 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9786 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9787 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9788 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9789 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9790 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9791 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9792 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9793 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9794 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9795 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9796 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9797 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9798 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9799 /* SIB */
9800 case 4:
9801 case 12:
9802 {
9803 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9804 uInfo = (uint64_t)bSib << 32;
9805
9806 /* Get the index and scale it. */
9807 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9808 {
9809 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9810 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9811 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9812 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9813 case 4: u64EffAddr = 0; /*none */ break;
9814 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9815 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9816 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9817 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9818 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9819 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9820 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9821 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9822 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9823 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9824 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9825 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9826 }
9827 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9828
9829 /* add base */
9830 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9831 {
9832 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9833 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9834 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9835 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9836 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9837 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9838 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9839 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9840 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9841 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9842 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9843 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9844 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9845 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9846 /* complicated encodings */
9847 case 5:
9848 case 13:
9849 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9850 {
9851 if (!pVCpu->iem.s.uRexB)
9852 {
9853 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9854 SET_SS_DEF();
9855 }
9856 else
9857 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9858 }
9859 else
9860 {
9861 uint32_t u32Disp;
9862 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9863 u64EffAddr += (int32_t)u32Disp;
9864 uInfo |= u32Disp;
9865 }
9866 break;
9867 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9868 }
9869 break;
9870 }
9871 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9872 }
9873
9874 /* Get and add the displacement. */
9875 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9876 {
9877 case 0:
9878 break;
9879 case 1:
9880 {
9881 int8_t i8Disp;
9882 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9883 u64EffAddr += i8Disp;
9884 uInfo |= (uint32_t)(int32_t)i8Disp;
9885 break;
9886 }
9887 case 2:
9888 {
9889 uint32_t u32Disp;
9890 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9891 u64EffAddr += (int32_t)u32Disp;
9892 uInfo |= u32Disp;
9893 break;
9894 }
9895 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9896 }
9897
9898 }
9899
9900 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9901 *pGCPtrEff = u64EffAddr;
9902 else
9903 {
9904 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9905 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9906 }
9907 }
9908 *puInfo = uInfo;
9909
9910 Log5(("iemOpHlpCalcRmEffAddrEx: EffAddr=%#010RGv uInfo=%RX64\n", *pGCPtrEff, uInfo));
9911 return VINF_SUCCESS;
9912}
9913
9914/** @} */
9915
9916
9917#ifdef LOG_ENABLED
9918/**
9919 * Logs the current instruction.
9920 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9921 * @param fSameCtx Set if we have the same context information as the VMM,
9922 * clear if we may have already executed an instruction in
9923 * our debug context. When clear, we assume IEMCPU holds
9924 * valid CPU mode info.
9925 *
9926 * The @a fSameCtx parameter is now misleading and obsolete.
9927 * @param pszFunction The IEM function doing the execution.
9928 */
9929static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
9930{
9931# ifdef IN_RING3
9932 if (LogIs2Enabled())
9933 {
9934 char szInstr[256];
9935 uint32_t cbInstr = 0;
9936 if (fSameCtx)
9937 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
9938 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9939 szInstr, sizeof(szInstr), &cbInstr);
9940 else
9941 {
9942 uint32_t fFlags = 0;
9943 switch (IEM_GET_CPU_MODE(pVCpu))
9944 {
9945 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
9946 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
9947 case IEMMODE_16BIT:
9948 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
9949 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
9950 else
9951 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
9952 break;
9953 }
9954 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
9955 szInstr, sizeof(szInstr), &cbInstr);
9956 }
9957
9958 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9959 Log2(("**** %s fExec=%x\n"
9960 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
9961 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
9962 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
9963 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
9964 " %s\n"
9965 , pszFunction, pVCpu->iem.s.fExec,
9966 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
9967 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
9968 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
9969 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
9970 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
9971 szInstr));
9972
9973 if (LogIs3Enabled())
9974 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
9975 }
9976 else
9977# endif
9978 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
9979 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
9980 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
9981}
9982#endif /* LOG_ENABLED */
9983
9984
9985#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9986/**
9987 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
9988 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
9989 *
9990 * @returns Modified rcStrict.
9991 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9992 * @param rcStrict The instruction execution status.
9993 */
9994static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
9995{
9996 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
9997 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
9998 {
9999 /* VMX preemption timer takes priority over NMI-window exits. */
10000 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
10001 {
10002 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
10003 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
10004 }
10005 /*
10006 * Check remaining intercepts.
10007 *
10008 * NMI-window and Interrupt-window VM-exits.
10009 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
10010 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
10011 *
10012 * See Intel spec. 26.7.6 "NMI-Window Exiting".
10013 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
10014 */
10015 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
10016 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
10017 && !TRPMHasTrap(pVCpu))
10018 {
10019 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
10020 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
10021 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
10022 {
10023 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
10024 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
10025 }
10026 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
10027 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
10028 {
10029 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
10030 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
10031 }
10032 }
10033 }
10034 /* TPR-below threshold/APIC write has the highest priority. */
10035 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
10036 {
10037 rcStrict = iemVmxApicWriteEmulation(pVCpu);
10038 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
10039 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
10040 }
10041 /* MTF takes priority over VMX-preemption timer. */
10042 else
10043 {
10044 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
10045 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
10046 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
10047 }
10048 return rcStrict;
10049}
10050#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
10051
10052
10053/**
10054 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
10055 * IEMExecOneWithPrefetchedByPC.
10056 *
10057 * Similar code is found in IEMExecLots.
10058 *
10059 * @return Strict VBox status code.
10060 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10061 * @param fExecuteInhibit If set, execute the instruction following CLI,
10062 * POP SS and MOV SS,GR.
10063 * @param pszFunction The calling function name.
10064 */
10065DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
10066{
10067 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
10068 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
10069 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
10070 RT_NOREF_PV(pszFunction);
10071
10072#ifdef IEM_WITH_SETJMP
10073 VBOXSTRICTRC rcStrict;
10074 IEM_TRY_SETJMP(pVCpu, rcStrict)
10075 {
10076 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10077 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10078 }
10079 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10080 {
10081 pVCpu->iem.s.cLongJumps++;
10082 }
10083 IEM_CATCH_LONGJMP_END(pVCpu);
10084#else
10085 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10086 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10087#endif
10088 if (rcStrict == VINF_SUCCESS)
10089 pVCpu->iem.s.cInstructions++;
10090 if (pVCpu->iem.s.cActiveMappings > 0)
10091 {
10092 Assert(rcStrict != VINF_SUCCESS);
10093 iemMemRollback(pVCpu);
10094 }
10095 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
10096 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
10097 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
10098
10099//#ifdef DEBUG
10100// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
10101//#endif
10102
10103#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10104 /*
10105 * Perform any VMX nested-guest instruction boundary actions.
10106 *
10107 * If any of these causes a VM-exit, we must skip executing the next
10108 * instruction (would run into stale page tables). A VM-exit makes sure
10109 * there is no interrupt-inhibition, so that should ensure we don't go
10110 * to try execute the next instruction. Clearing fExecuteInhibit is
10111 * problematic because of the setjmp/longjmp clobbering above.
10112 */
10113 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10114 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
10115 || rcStrict != VINF_SUCCESS)
10116 { /* likely */ }
10117 else
10118 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10119#endif
10120
10121 /* Execute the next instruction as well if a cli, pop ss or
10122 mov ss, Gr has just completed successfully. */
10123 if ( fExecuteInhibit
10124 && rcStrict == VINF_SUCCESS
10125 && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
10126 {
10127 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fExec & (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK));
10128 if (rcStrict == VINF_SUCCESS)
10129 {
10130#ifdef LOG_ENABLED
10131 iemLogCurInstr(pVCpu, false, pszFunction);
10132#endif
10133#ifdef IEM_WITH_SETJMP
10134 IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
10135 {
10136 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10137 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10138 }
10139 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10140 {
10141 pVCpu->iem.s.cLongJumps++;
10142 }
10143 IEM_CATCH_LONGJMP_END(pVCpu);
10144#else
10145 IEM_OPCODE_GET_FIRST_U8(&b);
10146 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10147#endif
10148 if (rcStrict == VINF_SUCCESS)
10149 {
10150 pVCpu->iem.s.cInstructions++;
10151#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10152 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10153 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
10154 { /* likely */ }
10155 else
10156 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10157#endif
10158 }
10159 if (pVCpu->iem.s.cActiveMappings > 0)
10160 {
10161 Assert(rcStrict != VINF_SUCCESS);
10162 iemMemRollback(pVCpu);
10163 }
10164 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
10165 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
10166 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
10167 }
10168 else if (pVCpu->iem.s.cActiveMappings > 0)
10169 iemMemRollback(pVCpu);
10170 /** @todo drop this after we bake this change into RIP advancing. */
10171 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
10172 }
10173
10174 /*
10175 * Return value fiddling, statistics and sanity assertions.
10176 */
10177 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10178
10179 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10180 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10181 return rcStrict;
10182}
10183
10184
10185/**
10186 * Execute one instruction.
10187 *
10188 * @return Strict VBox status code.
10189 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10190 */
10191VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
10192{
10193 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
10194#ifdef LOG_ENABLED
10195 iemLogCurInstr(pVCpu, true, "IEMExecOne");
10196#endif
10197
10198 /*
10199 * Do the decoding and emulation.
10200 */
10201 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10202 if (rcStrict == VINF_SUCCESS)
10203 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
10204 else if (pVCpu->iem.s.cActiveMappings > 0)
10205 iemMemRollback(pVCpu);
10206
10207 if (rcStrict != VINF_SUCCESS)
10208 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10209 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10210 return rcStrict;
10211}
10212
10213
10214VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
10215{
10216 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
10217 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10218 if (rcStrict == VINF_SUCCESS)
10219 {
10220 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
10221 if (pcbWritten)
10222 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
10223 }
10224 else if (pVCpu->iem.s.cActiveMappings > 0)
10225 iemMemRollback(pVCpu);
10226
10227 return rcStrict;
10228}
10229
10230
10231VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
10232 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10233{
10234 VBOXSTRICTRC rcStrict;
10235 if ( cbOpcodeBytes
10236 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
10237 {
10238 iemInitDecoder(pVCpu, 0 /*fExecOpts*/);
10239#ifdef IEM_WITH_CODE_TLB
10240 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
10241 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
10242 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
10243 pVCpu->iem.s.offCurInstrStart = 0;
10244 pVCpu->iem.s.offInstrNextByte = 0;
10245 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
10246#else
10247 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
10248 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
10249#endif
10250 rcStrict = VINF_SUCCESS;
10251 }
10252 else
10253 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10254 if (rcStrict == VINF_SUCCESS)
10255 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
10256 else if (pVCpu->iem.s.cActiveMappings > 0)
10257 iemMemRollback(pVCpu);
10258
10259 return rcStrict;
10260}
10261
10262
10263VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
10264{
10265 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
10266 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
10267 if (rcStrict == VINF_SUCCESS)
10268 {
10269 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
10270 if (pcbWritten)
10271 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
10272 }
10273 else if (pVCpu->iem.s.cActiveMappings > 0)
10274 iemMemRollback(pVCpu);
10275
10276 return rcStrict;
10277}
10278
10279
10280VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
10281 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10282{
10283 VBOXSTRICTRC rcStrict;
10284 if ( cbOpcodeBytes
10285 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
10286 {
10287 iemInitDecoder(pVCpu, IEM_F_BYPASS_HANDLERS);
10288#ifdef IEM_WITH_CODE_TLB
10289 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
10290 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
10291 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
10292 pVCpu->iem.s.offCurInstrStart = 0;
10293 pVCpu->iem.s.offInstrNextByte = 0;
10294 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
10295#else
10296 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
10297 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
10298#endif
10299 rcStrict = VINF_SUCCESS;
10300 }
10301 else
10302 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
10303 if (rcStrict == VINF_SUCCESS)
10304 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
10305 else if (pVCpu->iem.s.cActiveMappings > 0)
10306 iemMemRollback(pVCpu);
10307
10308 return rcStrict;
10309}
10310
10311
10312/**
10313 * For handling split cacheline lock operations when the host has split-lock
10314 * detection enabled.
10315 *
10316 * This will cause the interpreter to disregard the lock prefix and implicit
10317 * locking (xchg).
10318 *
10319 * @returns Strict VBox status code.
10320 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10321 */
10322VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
10323{
10324 /*
10325 * Do the decoding and emulation.
10326 */
10327 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_X86_DISREGARD_LOCK);
10328 if (rcStrict == VINF_SUCCESS)
10329 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
10330 else if (pVCpu->iem.s.cActiveMappings > 0)
10331 iemMemRollback(pVCpu);
10332
10333 if (rcStrict != VINF_SUCCESS)
10334 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10335 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10336 return rcStrict;
10337}
10338
10339
10340/**
10341 * Code common to IEMExecLots and IEMExecRecompilerThreaded that attempts to
10342 * inject a pending TRPM trap.
10343 */
10344VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu)
10345{
10346 Assert(TRPMHasTrap(pVCpu));
10347
10348 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
10349 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
10350 {
10351 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
10352#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10353 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
10354 if (fIntrEnabled)
10355 {
10356 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
10357 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
10358 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
10359 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
10360 else
10361 {
10362 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
10363 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
10364 }
10365 }
10366#else
10367 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
10368#endif
10369 if (fIntrEnabled)
10370 {
10371 uint8_t u8TrapNo;
10372 TRPMEVENT enmType;
10373 uint32_t uErrCode;
10374 RTGCPTR uCr2;
10375 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
10376 AssertRC(rc2);
10377 Assert(enmType == TRPM_HARDWARE_INT);
10378 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
10379
10380 TRPMResetTrap(pVCpu);
10381
10382#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10383 /* Injecting an event may cause a VM-exit. */
10384 if ( rcStrict != VINF_SUCCESS
10385 && rcStrict != VINF_IEM_RAISED_XCPT)
10386 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
10387#else
10388 NOREF(rcStrict);
10389#endif
10390 }
10391 }
10392
10393 return VINF_SUCCESS;
10394}
10395
10396
10397VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
10398{
10399 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
10400 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
10401 Assert(cMaxInstructions > 0);
10402
10403 /*
10404 * See if there is an interrupt pending in TRPM, inject it if we can.
10405 */
10406 /** @todo What if we are injecting an exception and not an interrupt? Is that
10407 * possible here? For now we assert it is indeed only an interrupt. */
10408 if (!TRPMHasTrap(pVCpu))
10409 { /* likely */ }
10410 else
10411 {
10412 VBOXSTRICTRC rcStrict = iemExecInjectPendingTrap(pVCpu);
10413 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10414 { /*likely */ }
10415 else
10416 return rcStrict;
10417 }
10418
10419 /*
10420 * Initial decoder init w/ prefetch, then setup setjmp.
10421 */
10422 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10423 if (rcStrict == VINF_SUCCESS)
10424 {
10425#ifdef IEM_WITH_SETJMP
10426 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
10427 IEM_TRY_SETJMP(pVCpu, rcStrict)
10428#endif
10429 {
10430 /*
10431 * The run loop. We limit ourselves to 4096 instructions right now.
10432 */
10433 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
10434 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10435 for (;;)
10436 {
10437 /*
10438 * Log the state.
10439 */
10440#ifdef LOG_ENABLED
10441 iemLogCurInstr(pVCpu, true, "IEMExecLots");
10442#endif
10443
10444 /*
10445 * Do the decoding and emulation.
10446 */
10447 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10448 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10449#ifdef VBOX_STRICT
10450 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
10451#endif
10452 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10453 {
10454 Assert(pVCpu->iem.s.cActiveMappings == 0);
10455 pVCpu->iem.s.cInstructions++;
10456
10457#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10458 /* Perform any VMX nested-guest instruction boundary actions. */
10459 uint64_t fCpu = pVCpu->fLocalForcedActions;
10460 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10461 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
10462 { /* likely */ }
10463 else
10464 {
10465 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10466 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10467 fCpu = pVCpu->fLocalForcedActions;
10468 else
10469 {
10470 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10471 break;
10472 }
10473 }
10474#endif
10475 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
10476 {
10477#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
10478 uint64_t fCpu = pVCpu->fLocalForcedActions;
10479#endif
10480 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
10481 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
10482 | VMCPU_FF_TLB_FLUSH
10483 | VMCPU_FF_UNHALT );
10484
10485 if (RT_LIKELY( ( !fCpu
10486 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10487 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
10488 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
10489 {
10490 if (--cMaxInstructionsGccStupidity > 0)
10491 {
10492 /* Poll timers every now an then according to the caller's specs. */
10493 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
10494 || !TMTimerPollBool(pVM, pVCpu))
10495 {
10496 Assert(pVCpu->iem.s.cActiveMappings == 0);
10497 iemReInitDecoder(pVCpu);
10498 continue;
10499 }
10500 }
10501 }
10502 }
10503 Assert(pVCpu->iem.s.cActiveMappings == 0);
10504 }
10505 else if (pVCpu->iem.s.cActiveMappings > 0)
10506 iemMemRollback(pVCpu);
10507 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10508 break;
10509 }
10510 }
10511#ifdef IEM_WITH_SETJMP
10512 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10513 {
10514 if (pVCpu->iem.s.cActiveMappings > 0)
10515 iemMemRollback(pVCpu);
10516# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10517 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10518# endif
10519 pVCpu->iem.s.cLongJumps++;
10520 }
10521 IEM_CATCH_LONGJMP_END(pVCpu);
10522#endif
10523
10524 /*
10525 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10526 */
10527 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10528 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10529 }
10530 else
10531 {
10532 if (pVCpu->iem.s.cActiveMappings > 0)
10533 iemMemRollback(pVCpu);
10534
10535#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10536 /*
10537 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10538 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10539 */
10540 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10541#endif
10542 }
10543
10544 /*
10545 * Maybe re-enter raw-mode and log.
10546 */
10547 if (rcStrict != VINF_SUCCESS)
10548 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10549 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10550 if (pcInstructions)
10551 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
10552 return rcStrict;
10553}
10554
10555
10556/**
10557 * Interface used by EMExecuteExec, does exit statistics and limits.
10558 *
10559 * @returns Strict VBox status code.
10560 * @param pVCpu The cross context virtual CPU structure.
10561 * @param fWillExit To be defined.
10562 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
10563 * @param cMaxInstructions Maximum number of instructions to execute.
10564 * @param cMaxInstructionsWithoutExits
10565 * The max number of instructions without exits.
10566 * @param pStats Where to return statistics.
10567 */
10568VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
10569 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
10570{
10571 NOREF(fWillExit); /** @todo define flexible exit crits */
10572
10573 /*
10574 * Initialize return stats.
10575 */
10576 pStats->cInstructions = 0;
10577 pStats->cExits = 0;
10578 pStats->cMaxExitDistance = 0;
10579 pStats->cReserved = 0;
10580
10581 /*
10582 * Initial decoder init w/ prefetch, then setup setjmp.
10583 */
10584 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10585 if (rcStrict == VINF_SUCCESS)
10586 {
10587#ifdef IEM_WITH_SETJMP
10588 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf?!? */
10589 IEM_TRY_SETJMP(pVCpu, rcStrict)
10590#endif
10591 {
10592#ifdef IN_RING0
10593 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
10594#endif
10595 uint32_t cInstructionSinceLastExit = 0;
10596
10597 /*
10598 * The run loop. We limit ourselves to 4096 instructions right now.
10599 */
10600 PVM pVM = pVCpu->CTX_SUFF(pVM);
10601 for (;;)
10602 {
10603 /*
10604 * Log the state.
10605 */
10606#ifdef LOG_ENABLED
10607 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
10608#endif
10609
10610 /*
10611 * Do the decoding and emulation.
10612 */
10613 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
10614
10615 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10616 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10617
10618 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
10619 && cInstructionSinceLastExit > 0 /* don't count the first */ )
10620 {
10621 pStats->cExits += 1;
10622 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
10623 pStats->cMaxExitDistance = cInstructionSinceLastExit;
10624 cInstructionSinceLastExit = 0;
10625 }
10626
10627 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10628 {
10629 Assert(pVCpu->iem.s.cActiveMappings == 0);
10630 pVCpu->iem.s.cInstructions++;
10631 pStats->cInstructions++;
10632 cInstructionSinceLastExit++;
10633
10634#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10635 /* Perform any VMX nested-guest instruction boundary actions. */
10636 uint64_t fCpu = pVCpu->fLocalForcedActions;
10637 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10638 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
10639 { /* likely */ }
10640 else
10641 {
10642 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10643 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10644 fCpu = pVCpu->fLocalForcedActions;
10645 else
10646 {
10647 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10648 break;
10649 }
10650 }
10651#endif
10652 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
10653 {
10654#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
10655 uint64_t fCpu = pVCpu->fLocalForcedActions;
10656#endif
10657 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
10658 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
10659 | VMCPU_FF_TLB_FLUSH
10660 | VMCPU_FF_UNHALT );
10661 if (RT_LIKELY( ( ( !fCpu
10662 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10663 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
10664 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
10665 || pStats->cInstructions < cMinInstructions))
10666 {
10667 if (pStats->cInstructions < cMaxInstructions)
10668 {
10669 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
10670 {
10671#ifdef IN_RING0
10672 if ( !fCheckPreemptionPending
10673 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
10674#endif
10675 {
10676 Assert(pVCpu->iem.s.cActiveMappings == 0);
10677 iemReInitDecoder(pVCpu);
10678 continue;
10679 }
10680#ifdef IN_RING0
10681 rcStrict = VINF_EM_RAW_INTERRUPT;
10682 break;
10683#endif
10684 }
10685 }
10686 }
10687 Assert(!(fCpu & VMCPU_FF_IEM));
10688 }
10689 Assert(pVCpu->iem.s.cActiveMappings == 0);
10690 }
10691 else if (pVCpu->iem.s.cActiveMappings > 0)
10692 iemMemRollback(pVCpu);
10693 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10694 break;
10695 }
10696 }
10697#ifdef IEM_WITH_SETJMP
10698 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10699 {
10700 if (pVCpu->iem.s.cActiveMappings > 0)
10701 iemMemRollback(pVCpu);
10702 pVCpu->iem.s.cLongJumps++;
10703 }
10704 IEM_CATCH_LONGJMP_END(pVCpu);
10705#endif
10706
10707 /*
10708 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10709 */
10710 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10711 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10712 }
10713 else
10714 {
10715 if (pVCpu->iem.s.cActiveMappings > 0)
10716 iemMemRollback(pVCpu);
10717
10718#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10719 /*
10720 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10721 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10722 */
10723 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10724#endif
10725 }
10726
10727 /*
10728 * Maybe re-enter raw-mode and log.
10729 */
10730 if (rcStrict != VINF_SUCCESS)
10731 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
10732 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
10733 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
10734 return rcStrict;
10735}
10736
10737
10738/**
10739 * Injects a trap, fault, abort, software interrupt or external interrupt.
10740 *
10741 * The parameter list matches TRPMQueryTrapAll pretty closely.
10742 *
10743 * @returns Strict VBox status code.
10744 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10745 * @param u8TrapNo The trap number.
10746 * @param enmType What type is it (trap/fault/abort), software
10747 * interrupt or hardware interrupt.
10748 * @param uErrCode The error code if applicable.
10749 * @param uCr2 The CR2 value if applicable.
10750 * @param cbInstr The instruction length (only relevant for
10751 * software interrupts).
10752 */
10753VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
10754 uint8_t cbInstr)
10755{
10756 iemInitDecoder(pVCpu, 0 /*fExecOpts*/); /** @todo wrong init function! */
10757#ifdef DBGFTRACE_ENABLED
10758 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
10759 u8TrapNo, enmType, uErrCode, uCr2);
10760#endif
10761
10762 uint32_t fFlags;
10763 switch (enmType)
10764 {
10765 case TRPM_HARDWARE_INT:
10766 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
10767 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
10768 uErrCode = uCr2 = 0;
10769 break;
10770
10771 case TRPM_SOFTWARE_INT:
10772 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
10773 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
10774 uErrCode = uCr2 = 0;
10775 break;
10776
10777 case TRPM_TRAP:
10778 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
10779 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
10780 if (u8TrapNo == X86_XCPT_PF)
10781 fFlags |= IEM_XCPT_FLAGS_CR2;
10782 switch (u8TrapNo)
10783 {
10784 case X86_XCPT_DF:
10785 case X86_XCPT_TS:
10786 case X86_XCPT_NP:
10787 case X86_XCPT_SS:
10788 case X86_XCPT_PF:
10789 case X86_XCPT_AC:
10790 case X86_XCPT_GP:
10791 fFlags |= IEM_XCPT_FLAGS_ERR;
10792 break;
10793 }
10794 break;
10795
10796 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10797 }
10798
10799 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
10800
10801 if (pVCpu->iem.s.cActiveMappings > 0)
10802 iemMemRollback(pVCpu);
10803
10804 return rcStrict;
10805}
10806
10807
10808/**
10809 * Injects the active TRPM event.
10810 *
10811 * @returns Strict VBox status code.
10812 * @param pVCpu The cross context virtual CPU structure.
10813 */
10814VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
10815{
10816#ifndef IEM_IMPLEMENTS_TASKSWITCH
10817 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
10818#else
10819 uint8_t u8TrapNo;
10820 TRPMEVENT enmType;
10821 uint32_t uErrCode;
10822 RTGCUINTPTR uCr2;
10823 uint8_t cbInstr;
10824 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
10825 if (RT_FAILURE(rc))
10826 return rc;
10827
10828 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
10829 * ICEBP \#DB injection as a special case. */
10830 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
10831#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
10832 if (rcStrict == VINF_SVM_VMEXIT)
10833 rcStrict = VINF_SUCCESS;
10834#endif
10835#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10836 if (rcStrict == VINF_VMX_VMEXIT)
10837 rcStrict = VINF_SUCCESS;
10838#endif
10839 /** @todo Are there any other codes that imply the event was successfully
10840 * delivered to the guest? See @bugref{6607}. */
10841 if ( rcStrict == VINF_SUCCESS
10842 || rcStrict == VINF_IEM_RAISED_XCPT)
10843 TRPMResetTrap(pVCpu);
10844
10845 return rcStrict;
10846#endif
10847}
10848
10849
10850VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
10851{
10852 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10853 return VERR_NOT_IMPLEMENTED;
10854}
10855
10856
10857VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
10858{
10859 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10860 return VERR_NOT_IMPLEMENTED;
10861}
10862
10863
10864/**
10865 * Interface for HM and EM for executing string I/O OUT (write) instructions.
10866 *
10867 * This API ASSUMES that the caller has already verified that the guest code is
10868 * allowed to access the I/O port. (The I/O port is in the DX register in the
10869 * guest state.)
10870 *
10871 * @returns Strict VBox status code.
10872 * @param pVCpu The cross context virtual CPU structure.
10873 * @param cbValue The size of the I/O port access (1, 2, or 4).
10874 * @param enmAddrMode The addressing mode.
10875 * @param fRepPrefix Indicates whether a repeat prefix is used
10876 * (doesn't matter which for this instruction).
10877 * @param cbInstr The instruction length in bytes.
10878 * @param iEffSeg The effective segment address.
10879 * @param fIoChecked Whether the access to the I/O port has been
10880 * checked or not. It's typically checked in the
10881 * HM scenario.
10882 */
10883VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10884 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
10885{
10886 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
10887 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10888
10889 /*
10890 * State init.
10891 */
10892 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10893
10894 /*
10895 * Switch orgy for getting to the right handler.
10896 */
10897 VBOXSTRICTRC rcStrict;
10898 if (fRepPrefix)
10899 {
10900 switch (enmAddrMode)
10901 {
10902 case IEMMODE_16BIT:
10903 switch (cbValue)
10904 {
10905 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10906 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10907 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10908 default:
10909 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10910 }
10911 break;
10912
10913 case IEMMODE_32BIT:
10914 switch (cbValue)
10915 {
10916 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10917 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10918 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10919 default:
10920 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10921 }
10922 break;
10923
10924 case IEMMODE_64BIT:
10925 switch (cbValue)
10926 {
10927 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10928 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10929 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10930 default:
10931 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10932 }
10933 break;
10934
10935 default:
10936 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10937 }
10938 }
10939 else
10940 {
10941 switch (enmAddrMode)
10942 {
10943 case IEMMODE_16BIT:
10944 switch (cbValue)
10945 {
10946 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10947 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10948 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10949 default:
10950 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10951 }
10952 break;
10953
10954 case IEMMODE_32BIT:
10955 switch (cbValue)
10956 {
10957 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10958 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10959 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10960 default:
10961 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10962 }
10963 break;
10964
10965 case IEMMODE_64BIT:
10966 switch (cbValue)
10967 {
10968 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10969 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10970 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10971 default:
10972 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10973 }
10974 break;
10975
10976 default:
10977 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10978 }
10979 }
10980
10981 if (pVCpu->iem.s.cActiveMappings)
10982 iemMemRollback(pVCpu);
10983
10984 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10985}
10986
10987
10988/**
10989 * Interface for HM and EM for executing string I/O IN (read) instructions.
10990 *
10991 * This API ASSUMES that the caller has already verified that the guest code is
10992 * allowed to access the I/O port. (The I/O port is in the DX register in the
10993 * guest state.)
10994 *
10995 * @returns Strict VBox status code.
10996 * @param pVCpu The cross context virtual CPU structure.
10997 * @param cbValue The size of the I/O port access (1, 2, or 4).
10998 * @param enmAddrMode The addressing mode.
10999 * @param fRepPrefix Indicates whether a repeat prefix is used
11000 * (doesn't matter which for this instruction).
11001 * @param cbInstr The instruction length in bytes.
11002 * @param fIoChecked Whether the access to the I/O port has been
11003 * checked or not. It's typically checked in the
11004 * HM scenario.
11005 */
11006VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
11007 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
11008{
11009 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11010
11011 /*
11012 * State init.
11013 */
11014 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11015
11016 /*
11017 * Switch orgy for getting to the right handler.
11018 */
11019 VBOXSTRICTRC rcStrict;
11020 if (fRepPrefix)
11021 {
11022 switch (enmAddrMode)
11023 {
11024 case IEMMODE_16BIT:
11025 switch (cbValue)
11026 {
11027 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
11028 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
11029 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
11030 default:
11031 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11032 }
11033 break;
11034
11035 case IEMMODE_32BIT:
11036 switch (cbValue)
11037 {
11038 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
11039 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
11040 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
11041 default:
11042 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11043 }
11044 break;
11045
11046 case IEMMODE_64BIT:
11047 switch (cbValue)
11048 {
11049 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
11050 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
11051 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
11052 default:
11053 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11054 }
11055 break;
11056
11057 default:
11058 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11059 }
11060 }
11061 else
11062 {
11063 switch (enmAddrMode)
11064 {
11065 case IEMMODE_16BIT:
11066 switch (cbValue)
11067 {
11068 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
11069 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
11070 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
11071 default:
11072 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11073 }
11074 break;
11075
11076 case IEMMODE_32BIT:
11077 switch (cbValue)
11078 {
11079 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
11080 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
11081 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
11082 default:
11083 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11084 }
11085 break;
11086
11087 case IEMMODE_64BIT:
11088 switch (cbValue)
11089 {
11090 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
11091 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
11092 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
11093 default:
11094 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11095 }
11096 break;
11097
11098 default:
11099 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11100 }
11101 }
11102
11103 if ( pVCpu->iem.s.cActiveMappings == 0
11104 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
11105 { /* likely */ }
11106 else
11107 {
11108 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
11109 iemMemRollback(pVCpu);
11110 }
11111 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11112}
11113
11114
11115/**
11116 * Interface for rawmode to write execute an OUT instruction.
11117 *
11118 * @returns Strict VBox status code.
11119 * @param pVCpu The cross context virtual CPU structure.
11120 * @param cbInstr The instruction length in bytes.
11121 * @param u16Port The port to read.
11122 * @param fImm Whether the port is specified using an immediate operand or
11123 * using the implicit DX register.
11124 * @param cbReg The register size.
11125 *
11126 * @remarks In ring-0 not all of the state needs to be synced in.
11127 */
11128VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
11129{
11130 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11131 Assert(cbReg <= 4 && cbReg != 3);
11132
11133 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11134 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, cbReg,
11135 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
11136 Assert(!pVCpu->iem.s.cActiveMappings);
11137 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11138}
11139
11140
11141/**
11142 * Interface for rawmode to write execute an IN instruction.
11143 *
11144 * @returns Strict VBox status code.
11145 * @param pVCpu The cross context virtual CPU structure.
11146 * @param cbInstr The instruction length in bytes.
11147 * @param u16Port The port to read.
11148 * @param fImm Whether the port is specified using an immediate operand or
11149 * using the implicit DX.
11150 * @param cbReg The register size.
11151 */
11152VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
11153{
11154 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11155 Assert(cbReg <= 4 && cbReg != 3);
11156
11157 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11158 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, cbReg,
11159 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
11160 Assert(!pVCpu->iem.s.cActiveMappings);
11161 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11162}
11163
11164
11165/**
11166 * Interface for HM and EM to write to a CRx register.
11167 *
11168 * @returns Strict VBox status code.
11169 * @param pVCpu The cross context virtual CPU structure.
11170 * @param cbInstr The instruction length in bytes.
11171 * @param iCrReg The control register number (destination).
11172 * @param iGReg The general purpose register number (source).
11173 *
11174 * @remarks In ring-0 not all of the state needs to be synced in.
11175 */
11176VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
11177{
11178 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11179 Assert(iCrReg < 16);
11180 Assert(iGReg < 16);
11181
11182 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11183 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
11184 Assert(!pVCpu->iem.s.cActiveMappings);
11185 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11186}
11187
11188
11189/**
11190 * Interface for HM and EM to read from a CRx register.
11191 *
11192 * @returns Strict VBox status code.
11193 * @param pVCpu The cross context virtual CPU structure.
11194 * @param cbInstr The instruction length in bytes.
11195 * @param iGReg The general purpose register number (destination).
11196 * @param iCrReg The control register number (source).
11197 *
11198 * @remarks In ring-0 not all of the state needs to be synced in.
11199 */
11200VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
11201{
11202 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11203 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
11204 | CPUMCTX_EXTRN_APIC_TPR);
11205 Assert(iCrReg < 16);
11206 Assert(iGReg < 16);
11207
11208 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11209 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
11210 Assert(!pVCpu->iem.s.cActiveMappings);
11211 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11212}
11213
11214
11215/**
11216 * Interface for HM and EM to write to a DRx register.
11217 *
11218 * @returns Strict VBox status code.
11219 * @param pVCpu The cross context virtual CPU structure.
11220 * @param cbInstr The instruction length in bytes.
11221 * @param iDrReg The debug register number (destination).
11222 * @param iGReg The general purpose register number (source).
11223 *
11224 * @remarks In ring-0 not all of the state needs to be synced in.
11225 */
11226VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iDrReg, uint8_t iGReg)
11227{
11228 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11229 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
11230 Assert(iDrReg < 8);
11231 Assert(iGReg < 16);
11232
11233 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11234 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Dd_Rd, iDrReg, iGReg);
11235 Assert(!pVCpu->iem.s.cActiveMappings);
11236 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11237}
11238
11239
11240/**
11241 * Interface for HM and EM to read from a DRx register.
11242 *
11243 * @returns Strict VBox status code.
11244 * @param pVCpu The cross context virtual CPU structure.
11245 * @param cbInstr The instruction length in bytes.
11246 * @param iGReg The general purpose register number (destination).
11247 * @param iDrReg The debug register number (source).
11248 *
11249 * @remarks In ring-0 not all of the state needs to be synced in.
11250 */
11251VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iDrReg)
11252{
11253 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11254 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
11255 Assert(iDrReg < 8);
11256 Assert(iGReg < 16);
11257
11258 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11259 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Dd, iGReg, iDrReg);
11260 Assert(!pVCpu->iem.s.cActiveMappings);
11261 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11262}
11263
11264
11265/**
11266 * Interface for HM and EM to clear the CR0[TS] bit.
11267 *
11268 * @returns Strict VBox status code.
11269 * @param pVCpu The cross context virtual CPU structure.
11270 * @param cbInstr The instruction length in bytes.
11271 *
11272 * @remarks In ring-0 not all of the state needs to be synced in.
11273 */
11274VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
11275{
11276 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11277
11278 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11279 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
11280 Assert(!pVCpu->iem.s.cActiveMappings);
11281 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11282}
11283
11284
11285/**
11286 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
11287 *
11288 * @returns Strict VBox status code.
11289 * @param pVCpu The cross context virtual CPU structure.
11290 * @param cbInstr The instruction length in bytes.
11291 * @param uValue The value to load into CR0.
11292 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
11293 * memory operand. Otherwise pass NIL_RTGCPTR.
11294 *
11295 * @remarks In ring-0 not all of the state needs to be synced in.
11296 */
11297VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
11298{
11299 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11300
11301 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11302 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
11303 Assert(!pVCpu->iem.s.cActiveMappings);
11304 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11305}
11306
11307
11308/**
11309 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
11310 *
11311 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
11312 *
11313 * @returns Strict VBox status code.
11314 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11315 * @param cbInstr The instruction length in bytes.
11316 * @remarks In ring-0 not all of the state needs to be synced in.
11317 * @thread EMT(pVCpu)
11318 */
11319VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
11320{
11321 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11322
11323 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11324 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
11325 Assert(!pVCpu->iem.s.cActiveMappings);
11326 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11327}
11328
11329
11330/**
11331 * Interface for HM and EM to emulate the WBINVD instruction.
11332 *
11333 * @returns Strict VBox status code.
11334 * @param pVCpu The cross context virtual CPU structure.
11335 * @param cbInstr The instruction length in bytes.
11336 *
11337 * @remarks In ring-0 not all of the state needs to be synced in.
11338 */
11339VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
11340{
11341 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11342
11343 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11344 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
11345 Assert(!pVCpu->iem.s.cActiveMappings);
11346 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11347}
11348
11349
11350/**
11351 * Interface for HM and EM to emulate the INVD instruction.
11352 *
11353 * @returns Strict VBox status code.
11354 * @param pVCpu The cross context virtual CPU structure.
11355 * @param cbInstr The instruction length in bytes.
11356 *
11357 * @remarks In ring-0 not all of the state needs to be synced in.
11358 */
11359VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
11360{
11361 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11362
11363 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11364 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
11365 Assert(!pVCpu->iem.s.cActiveMappings);
11366 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11367}
11368
11369
11370/**
11371 * Interface for HM and EM to emulate the INVLPG instruction.
11372 *
11373 * @returns Strict VBox status code.
11374 * @retval VINF_PGM_SYNC_CR3
11375 *
11376 * @param pVCpu The cross context virtual CPU structure.
11377 * @param cbInstr The instruction length in bytes.
11378 * @param GCPtrPage The effective address of the page to invalidate.
11379 *
11380 * @remarks In ring-0 not all of the state needs to be synced in.
11381 */
11382VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
11383{
11384 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11385
11386 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11387 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
11388 Assert(!pVCpu->iem.s.cActiveMappings);
11389 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11390}
11391
11392
11393/**
11394 * Interface for HM and EM to emulate the INVPCID instruction.
11395 *
11396 * @returns Strict VBox status code.
11397 * @retval VINF_PGM_SYNC_CR3
11398 *
11399 * @param pVCpu The cross context virtual CPU structure.
11400 * @param cbInstr The instruction length in bytes.
11401 * @param iEffSeg The effective segment register.
11402 * @param GCPtrDesc The effective address of the INVPCID descriptor.
11403 * @param uType The invalidation type.
11404 *
11405 * @remarks In ring-0 not all of the state needs to be synced in.
11406 */
11407VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
11408 uint64_t uType)
11409{
11410 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
11411
11412 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11413 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
11414 Assert(!pVCpu->iem.s.cActiveMappings);
11415 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11416}
11417
11418
11419/**
11420 * Interface for HM and EM to emulate the CPUID instruction.
11421 *
11422 * @returns Strict VBox status code.
11423 *
11424 * @param pVCpu The cross context virtual CPU structure.
11425 * @param cbInstr The instruction length in bytes.
11426 *
11427 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
11428 */
11429VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
11430{
11431 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11432 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
11433
11434 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11435 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
11436 Assert(!pVCpu->iem.s.cActiveMappings);
11437 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11438}
11439
11440
11441/**
11442 * Interface for HM and EM to emulate the RDPMC instruction.
11443 *
11444 * @returns Strict VBox status code.
11445 *
11446 * @param pVCpu The cross context virtual CPU structure.
11447 * @param cbInstr The instruction length in bytes.
11448 *
11449 * @remarks Not all of the state needs to be synced in.
11450 */
11451VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
11452{
11453 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11454 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
11455
11456 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11457 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
11458 Assert(!pVCpu->iem.s.cActiveMappings);
11459 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11460}
11461
11462
11463/**
11464 * Interface for HM and EM to emulate the RDTSC instruction.
11465 *
11466 * @returns Strict VBox status code.
11467 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11468 *
11469 * @param pVCpu The cross context virtual CPU structure.
11470 * @param cbInstr The instruction length in bytes.
11471 *
11472 * @remarks Not all of the state needs to be synced in.
11473 */
11474VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
11475{
11476 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11477 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
11478
11479 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11480 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
11481 Assert(!pVCpu->iem.s.cActiveMappings);
11482 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11483}
11484
11485
11486/**
11487 * Interface for HM and EM to emulate the RDTSCP instruction.
11488 *
11489 * @returns Strict VBox status code.
11490 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11491 *
11492 * @param pVCpu The cross context virtual CPU structure.
11493 * @param cbInstr The instruction length in bytes.
11494 *
11495 * @remarks Not all of the state needs to be synced in. Recommended
11496 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
11497 */
11498VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
11499{
11500 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11501 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
11502
11503 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11504 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
11505 Assert(!pVCpu->iem.s.cActiveMappings);
11506 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11507}
11508
11509
11510/**
11511 * Interface for HM and EM to emulate the RDMSR instruction.
11512 *
11513 * @returns Strict VBox status code.
11514 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11515 *
11516 * @param pVCpu The cross context virtual CPU structure.
11517 * @param cbInstr The instruction length in bytes.
11518 *
11519 * @remarks Not all of the state needs to be synced in. Requires RCX and
11520 * (currently) all MSRs.
11521 */
11522VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
11523{
11524 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11525 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
11526
11527 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11528 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
11529 Assert(!pVCpu->iem.s.cActiveMappings);
11530 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11531}
11532
11533
11534/**
11535 * Interface for HM and EM to emulate the WRMSR instruction.
11536 *
11537 * @returns Strict VBox status code.
11538 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11539 *
11540 * @param pVCpu The cross context virtual CPU structure.
11541 * @param cbInstr The instruction length in bytes.
11542 *
11543 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
11544 * and (currently) all MSRs.
11545 */
11546VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
11547{
11548 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11549 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
11550 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
11551
11552 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11553 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
11554 Assert(!pVCpu->iem.s.cActiveMappings);
11555 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11556}
11557
11558
11559/**
11560 * Interface for HM and EM to emulate the MONITOR instruction.
11561 *
11562 * @returns Strict VBox status code.
11563 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11564 *
11565 * @param pVCpu The cross context virtual CPU structure.
11566 * @param cbInstr The instruction length in bytes.
11567 *
11568 * @remarks Not all of the state needs to be synced in.
11569 * @remarks ASSUMES the default segment of DS and no segment override prefixes
11570 * are used.
11571 */
11572VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
11573{
11574 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11575 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
11576
11577 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11578 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
11579 Assert(!pVCpu->iem.s.cActiveMappings);
11580 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11581}
11582
11583
11584/**
11585 * Interface for HM and EM to emulate the MWAIT instruction.
11586 *
11587 * @returns Strict VBox status code.
11588 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11589 *
11590 * @param pVCpu The cross context virtual CPU structure.
11591 * @param cbInstr The instruction length in bytes.
11592 *
11593 * @remarks Not all of the state needs to be synced in.
11594 */
11595VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
11596{
11597 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11598 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
11599
11600 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11601 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
11602 Assert(!pVCpu->iem.s.cActiveMappings);
11603 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11604}
11605
11606
11607/**
11608 * Interface for HM and EM to emulate the HLT instruction.
11609 *
11610 * @returns Strict VBox status code.
11611 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11612 *
11613 * @param pVCpu The cross context virtual CPU structure.
11614 * @param cbInstr The instruction length in bytes.
11615 *
11616 * @remarks Not all of the state needs to be synced in.
11617 */
11618VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
11619{
11620 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11621
11622 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11623 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
11624 Assert(!pVCpu->iem.s.cActiveMappings);
11625 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11626}
11627
11628
11629/**
11630 * Checks if IEM is in the process of delivering an event (interrupt or
11631 * exception).
11632 *
11633 * @returns true if we're in the process of raising an interrupt or exception,
11634 * false otherwise.
11635 * @param pVCpu The cross context virtual CPU structure.
11636 * @param puVector Where to store the vector associated with the
11637 * currently delivered event, optional.
11638 * @param pfFlags Where to store th event delivery flags (see
11639 * IEM_XCPT_FLAGS_XXX), optional.
11640 * @param puErr Where to store the error code associated with the
11641 * event, optional.
11642 * @param puCr2 Where to store the CR2 associated with the event,
11643 * optional.
11644 * @remarks The caller should check the flags to determine if the error code and
11645 * CR2 are valid for the event.
11646 */
11647VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
11648{
11649 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
11650 if (fRaisingXcpt)
11651 {
11652 if (puVector)
11653 *puVector = pVCpu->iem.s.uCurXcpt;
11654 if (pfFlags)
11655 *pfFlags = pVCpu->iem.s.fCurXcpt;
11656 if (puErr)
11657 *puErr = pVCpu->iem.s.uCurXcptErr;
11658 if (puCr2)
11659 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
11660 }
11661 return fRaisingXcpt;
11662}
11663
11664#ifdef IN_RING3
11665
11666/**
11667 * Handles the unlikely and probably fatal merge cases.
11668 *
11669 * @returns Merged status code.
11670 * @param rcStrict Current EM status code.
11671 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11672 * with @a rcStrict.
11673 * @param iMemMap The memory mapping index. For error reporting only.
11674 * @param pVCpu The cross context virtual CPU structure of the calling
11675 * thread, for error reporting only.
11676 */
11677DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
11678 unsigned iMemMap, PVMCPUCC pVCpu)
11679{
11680 if (RT_FAILURE_NP(rcStrict))
11681 return rcStrict;
11682
11683 if (RT_FAILURE_NP(rcStrictCommit))
11684 return rcStrictCommit;
11685
11686 if (rcStrict == rcStrictCommit)
11687 return rcStrictCommit;
11688
11689 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
11690 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
11691 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
11692 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
11693 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
11694 return VERR_IOM_FF_STATUS_IPE;
11695}
11696
11697
11698/**
11699 * Helper for IOMR3ProcessForceFlag.
11700 *
11701 * @returns Merged status code.
11702 * @param rcStrict Current EM status code.
11703 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11704 * with @a rcStrict.
11705 * @param iMemMap The memory mapping index. For error reporting only.
11706 * @param pVCpu The cross context virtual CPU structure of the calling
11707 * thread, for error reporting only.
11708 */
11709DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
11710{
11711 /* Simple. */
11712 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
11713 return rcStrictCommit;
11714
11715 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
11716 return rcStrict;
11717
11718 /* EM scheduling status codes. */
11719 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
11720 && rcStrict <= VINF_EM_LAST))
11721 {
11722 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
11723 && rcStrictCommit <= VINF_EM_LAST))
11724 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
11725 }
11726
11727 /* Unlikely */
11728 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
11729}
11730
11731
11732/**
11733 * Called by force-flag handling code when VMCPU_FF_IEM is set.
11734 *
11735 * @returns Merge between @a rcStrict and what the commit operation returned.
11736 * @param pVM The cross context VM structure.
11737 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11738 * @param rcStrict The status code returned by ring-0 or raw-mode.
11739 */
11740VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
11741{
11742 /*
11743 * Reset the pending commit.
11744 */
11745 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
11746 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
11747 ("%#x %#x %#x\n",
11748 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11749 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
11750
11751 /*
11752 * Commit the pending bounce buffers (usually just one).
11753 */
11754 unsigned cBufs = 0;
11755 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
11756 while (iMemMap-- > 0)
11757 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
11758 {
11759 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
11760 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
11761 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
11762
11763 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
11764 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
11765 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
11766
11767 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
11768 {
11769 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
11770 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
11771 pbBuf,
11772 cbFirst,
11773 PGMACCESSORIGIN_IEM);
11774 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
11775 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
11776 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
11777 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
11778 }
11779
11780 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
11781 {
11782 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
11783 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
11784 pbBuf + cbFirst,
11785 cbSecond,
11786 PGMACCESSORIGIN_IEM);
11787 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
11788 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
11789 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
11790 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
11791 }
11792 cBufs++;
11793 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
11794 }
11795
11796 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
11797 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
11798 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11799 pVCpu->iem.s.cActiveMappings = 0;
11800 return rcStrict;
11801}
11802
11803#endif /* IN_RING3 */
11804
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette