VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 100788

Last change on this file since 100788 was 100777, checked in by vboxsync, 21 months ago

IEM/VMM: More detailed (e.g. useful) memory access logging. bugref:10369

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 475.7 KB
Line 
1/* $Id: IEMAll.cpp 100777 2023-08-01 23:03:51Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_iem IEM - Interpreted Execution Manager
30 *
31 * The interpreted exeuction manager (IEM) is for executing short guest code
32 * sequences that are causing too many exits / virtualization traps. It will
33 * also be used to interpret single instructions, thus replacing the selective
34 * interpreters in EM and IOM.
35 *
36 * Design goals:
37 * - Relatively small footprint, although we favour speed and correctness
38 * over size.
39 * - Reasonably fast.
40 * - Correctly handle lock prefixed instructions.
41 * - Complete instruction set - eventually.
42 * - Refactorable into a recompiler, maybe.
43 * - Replace EMInterpret*.
44 *
45 * Using the existing disassembler has been considered, however this is thought
46 * to conflict with speed as the disassembler chews things a bit too much while
47 * leaving us with a somewhat complicated state to interpret afterwards.
48 *
49 *
50 * The current code is very much work in progress. You've been warned!
51 *
52 *
53 * @section sec_iem_fpu_instr FPU Instructions
54 *
55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
56 * same or equivalent instructions on the host FPU. To make life easy, we also
57 * let the FPU prioritize the unmasked exceptions for us. This however, only
58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
59 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
60 * can trigger spurious FPU exceptions.
61 *
62 * The guest FPU state is not loaded into the host CPU and kept there till we
63 * leave IEM because the calling conventions have declared an all year open
64 * season on much of the FPU state. For instance an innocent looking call to
65 * memcpy might end up using a whole bunch of XMM or MM registers if the
66 * particular implementation finds it worthwhile.
67 *
68 *
69 * @section sec_iem_logging Logging
70 *
71 * The IEM code uses the \"IEM\" log group for the main logging. The different
72 * logging levels/flags are generally used for the following purposes:
73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
74 * - Flow (LogFlow) : Basic enter/exit IEM state info.
75 * - Level 2 (Log2) : ?
76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
78 * - Level 5 (Log5) : Decoding details.
79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
80 * - Level 7 (Log7) : iret++ execution logging.
81 * - Level 8 (Log8) : Memory writes.
82 * - Level 9 (Log9) : Memory reads.
83 * - Level 10 (Log10): TLBs.
84 * - Level 11 (Log11): Unmasked FPU exceptions.
85 *
86 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
87 * - Level 1 (Log) : Errors and other major events.
88 * - Flow (LogFlow) : Misc flow stuff (cleanup?)
89 * - Level 2 (Log2) : VM exits.
90 *
91 * The syscall logging level assignments:
92 * - Level 1: DOS and BIOS.
93 * - Level 2: Windows 3.x
94 * - Level 3: Linux.
95 */
96
97/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
98#ifdef _MSC_VER
99# pragma warning(disable:4505)
100#endif
101
102
103/*********************************************************************************************************************************
104* Header Files *
105*********************************************************************************************************************************/
106#define LOG_GROUP LOG_GROUP_IEM
107#define VMCPU_INCL_CPUM_GST_CTX
108#include <VBox/vmm/iem.h>
109#include <VBox/vmm/cpum.h>
110#include <VBox/vmm/apic.h>
111#include <VBox/vmm/pdm.h>
112#include <VBox/vmm/pgm.h>
113#include <VBox/vmm/iom.h>
114#include <VBox/vmm/em.h>
115#include <VBox/vmm/hm.h>
116#include <VBox/vmm/nem.h>
117#include <VBox/vmm/gim.h>
118#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
119# include <VBox/vmm/em.h>
120# include <VBox/vmm/hm_svm.h>
121#endif
122#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
123# include <VBox/vmm/hmvmxinline.h>
124#endif
125#include <VBox/vmm/tm.h>
126#include <VBox/vmm/dbgf.h>
127#include <VBox/vmm/dbgftrace.h>
128#include "IEMInternal.h"
129#include <VBox/vmm/vmcc.h>
130#include <VBox/log.h>
131#include <VBox/err.h>
132#include <VBox/param.h>
133#include <VBox/dis.h>
134#include <iprt/asm-math.h>
135#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
136# include <iprt/asm-amd64-x86.h>
137#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
138# include <iprt/asm-arm.h>
139#endif
140#include <iprt/assert.h>
141#include <iprt/string.h>
142#include <iprt/x86.h>
143
144#include "IEMInline.h"
145
146
147/*********************************************************************************************************************************
148* Structures and Typedefs *
149*********************************************************************************************************************************/
150/**
151 * CPU exception classes.
152 */
153typedef enum IEMXCPTCLASS
154{
155 IEMXCPTCLASS_BENIGN,
156 IEMXCPTCLASS_CONTRIBUTORY,
157 IEMXCPTCLASS_PAGE_FAULT,
158 IEMXCPTCLASS_DOUBLE_FAULT
159} IEMXCPTCLASS;
160
161
162/*********************************************************************************************************************************
163* Global Variables *
164*********************************************************************************************************************************/
165#if defined(IEM_LOG_MEMORY_WRITES)
166/** What IEM just wrote. */
167uint8_t g_abIemWrote[256];
168/** How much IEM just wrote. */
169size_t g_cbIemWrote;
170#endif
171
172
173/*********************************************************************************************************************************
174* Internal Functions *
175*********************************************************************************************************************************/
176static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
177 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
178
179
180/**
181 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags, slow code
182 * path.
183 *
184 * @returns IEM_F_BRK_PENDING_XXX or zero.
185 * @param pVCpu The cross context virtual CPU structure of the
186 * calling thread.
187 *
188 * @note Don't call directly, use iemCalcExecDbgFlags instead.
189 */
190uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu)
191{
192 uint32_t fExec = 0;
193
194 /*
195 * Process guest breakpoints.
196 */
197#define PROCESS_ONE_BP(a_fDr7, a_iBp) do { \
198 if (a_fDr7 & X86_DR7_L_G(a_iBp)) \
199 { \
200 switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \
201 { \
202 case X86_DR7_RW_EO: \
203 fExec |= IEM_F_PENDING_BRK_INSTR; \
204 break; \
205 case X86_DR7_RW_WO: \
206 case X86_DR7_RW_RW: \
207 fExec |= IEM_F_PENDING_BRK_DATA; \
208 break; \
209 case X86_DR7_RW_IO: \
210 fExec |= IEM_F_PENDING_BRK_X86_IO; \
211 break; \
212 } \
213 } \
214 } while (0)
215
216 uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];
217 if (fGstDr7 & X86_DR7_ENABLED_MASK)
218 {
219 PROCESS_ONE_BP(fGstDr7, 0);
220 PROCESS_ONE_BP(fGstDr7, 1);
221 PROCESS_ONE_BP(fGstDr7, 2);
222 PROCESS_ONE_BP(fGstDr7, 3);
223 }
224
225 /*
226 * Process hypervisor breakpoints.
227 */
228 uint32_t const fHyperDr7 = DBGFBpGetDR7(pVCpu->CTX_SUFF(pVM));
229 if (fHyperDr7 & X86_DR7_ENABLED_MASK)
230 {
231 PROCESS_ONE_BP(fHyperDr7, 0);
232 PROCESS_ONE_BP(fHyperDr7, 1);
233 PROCESS_ONE_BP(fHyperDr7, 2);
234 PROCESS_ONE_BP(fHyperDr7, 3);
235 }
236
237 return fExec;
238}
239
240
241/**
242 * Initializes the decoder state.
243 *
244 * iemReInitDecoder is mostly a copy of this function.
245 *
246 * @param pVCpu The cross context virtual CPU structure of the
247 * calling thread.
248 * @param fExecOpts Optional execution flags:
249 * - IEM_F_BYPASS_HANDLERS
250 * - IEM_F_X86_DISREGARD_LOCK
251 */
252DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, uint32_t fExecOpts)
253{
254 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
255 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
256 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
257 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
258 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
259 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
260 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
261 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
262 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
263 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
264
265 /* Execution state: */
266 uint32_t fExec;
267 pVCpu->iem.s.fExec = fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
268
269 /* Decoder state: */
270 pVCpu->iem.s.enmDefAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
271 pVCpu->iem.s.enmEffAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK;
272 if ((fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)
273 {
274 pVCpu->iem.s.enmDefOpSize = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
275 pVCpu->iem.s.enmEffOpSize = fExec & IEM_F_MODE_CPUMODE_MASK;
276 }
277 else
278 {
279 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
280 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
281 }
282 pVCpu->iem.s.fPrefixes = 0;
283 pVCpu->iem.s.uRexReg = 0;
284 pVCpu->iem.s.uRexB = 0;
285 pVCpu->iem.s.uRexIndex = 0;
286 pVCpu->iem.s.idxPrefix = 0;
287 pVCpu->iem.s.uVex3rdReg = 0;
288 pVCpu->iem.s.uVexLength = 0;
289 pVCpu->iem.s.fEvexStuff = 0;
290 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
291#ifdef IEM_WITH_CODE_TLB
292 pVCpu->iem.s.pbInstrBuf = NULL;
293 pVCpu->iem.s.offInstrNextByte = 0;
294 pVCpu->iem.s.offCurInstrStart = 0;
295# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
296 pVCpu->iem.s.offOpcode = 0;
297# endif
298# ifdef VBOX_STRICT
299 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
300 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
301 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
302 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
303# endif
304#else
305 pVCpu->iem.s.offOpcode = 0;
306 pVCpu->iem.s.cbOpcode = 0;
307#endif
308 pVCpu->iem.s.offModRm = 0;
309 pVCpu->iem.s.cActiveMappings = 0;
310 pVCpu->iem.s.iNextMapping = 0;
311 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
312
313#ifdef DBGFTRACE_ENABLED
314 switch (IEM_GET_CPU_MODE(pVCpu))
315 {
316 case IEMMODE_64BIT:
317 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
318 break;
319 case IEMMODE_32BIT:
320 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
321 break;
322 case IEMMODE_16BIT:
323 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
324 break;
325 }
326#endif
327}
328
329
330/**
331 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
332 *
333 * This is mostly a copy of iemInitDecoder.
334 *
335 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
336 */
337DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
338{
339 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
340 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
341 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
342 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
343 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
344 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
345 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
346 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
347 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
348
349 /* ASSUMES: Anyone changing CPU state affecting the fExec bits will update them! */
350 AssertMsg((pVCpu->iem.s.fExec & ~IEM_F_USER_OPTS) == iemCalcExecFlags(pVCpu),
351 ("fExec=%#x iemCalcExecModeFlags=%#x\n", pVCpu->iem.s.fExec, iemCalcExecFlags(pVCpu)));
352
353 IEMMODE const enmMode = IEM_GET_CPU_MODE(pVCpu);
354 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
355 pVCpu->iem.s.enmEffAddrMode = enmMode;
356 if (enmMode != IEMMODE_64BIT)
357 {
358 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
359 pVCpu->iem.s.enmEffOpSize = enmMode;
360 }
361 else
362 {
363 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
364 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
365 }
366 pVCpu->iem.s.fPrefixes = 0;
367 pVCpu->iem.s.uRexReg = 0;
368 pVCpu->iem.s.uRexB = 0;
369 pVCpu->iem.s.uRexIndex = 0;
370 pVCpu->iem.s.idxPrefix = 0;
371 pVCpu->iem.s.uVex3rdReg = 0;
372 pVCpu->iem.s.uVexLength = 0;
373 pVCpu->iem.s.fEvexStuff = 0;
374 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
375#ifdef IEM_WITH_CODE_TLB
376 if (pVCpu->iem.s.pbInstrBuf)
377 {
378 uint64_t off = (enmMode == IEMMODE_64BIT
379 ? pVCpu->cpum.GstCtx.rip
380 : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
381 - pVCpu->iem.s.uInstrBufPc;
382 if (off < pVCpu->iem.s.cbInstrBufTotal)
383 {
384 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
385 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
386 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
387 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
388 else
389 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
390 }
391 else
392 {
393 pVCpu->iem.s.pbInstrBuf = NULL;
394 pVCpu->iem.s.offInstrNextByte = 0;
395 pVCpu->iem.s.offCurInstrStart = 0;
396 pVCpu->iem.s.cbInstrBuf = 0;
397 pVCpu->iem.s.cbInstrBufTotal = 0;
398 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
399 }
400 }
401 else
402 {
403 pVCpu->iem.s.offInstrNextByte = 0;
404 pVCpu->iem.s.offCurInstrStart = 0;
405 pVCpu->iem.s.cbInstrBuf = 0;
406 pVCpu->iem.s.cbInstrBufTotal = 0;
407# ifdef VBOX_STRICT
408 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
409# endif
410 }
411# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
412 pVCpu->iem.s.offOpcode = 0;
413# endif
414#else /* !IEM_WITH_CODE_TLB */
415 pVCpu->iem.s.cbOpcode = 0;
416 pVCpu->iem.s.offOpcode = 0;
417#endif /* !IEM_WITH_CODE_TLB */
418 pVCpu->iem.s.offModRm = 0;
419 Assert(pVCpu->iem.s.cActiveMappings == 0);
420 pVCpu->iem.s.iNextMapping = 0;
421 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
422 Assert(!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS));
423
424#ifdef DBGFTRACE_ENABLED
425 switch (enmMode)
426 {
427 case IEMMODE_64BIT:
428 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
429 break;
430 case IEMMODE_32BIT:
431 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
432 break;
433 case IEMMODE_16BIT:
434 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
435 break;
436 }
437#endif
438}
439
440
441
442/**
443 * Prefetch opcodes the first time when starting executing.
444 *
445 * @returns Strict VBox status code.
446 * @param pVCpu The cross context virtual CPU structure of the
447 * calling thread.
448 * @param fExecOpts Optional execution flags:
449 * - IEM_F_BYPASS_HANDLERS
450 * - IEM_F_X86_DISREGARD_LOCK
451 */
452static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
453{
454 iemInitDecoder(pVCpu, fExecOpts);
455
456#ifndef IEM_WITH_CODE_TLB
457 /*
458 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
459 *
460 * First translate CS:rIP to a physical address.
461 *
462 * Note! The iemOpcodeFetchMoreBytes code depends on this here code to fetch
463 * all relevant bytes from the first page, as it ASSUMES it's only ever
464 * called for dealing with CS.LIM, page crossing and instructions that
465 * are too long.
466 */
467 uint32_t cbToTryRead;
468 RTGCPTR GCPtrPC;
469 if (IEM_IS_64BIT_CODE(pVCpu))
470 {
471 cbToTryRead = GUEST_PAGE_SIZE;
472 GCPtrPC = pVCpu->cpum.GstCtx.rip;
473 if (IEM_IS_CANONICAL(GCPtrPC))
474 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
475 else
476 return iemRaiseGeneralProtectionFault0(pVCpu);
477 }
478 else
479 {
480 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
481 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu), ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
482 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
483 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
484 else
485 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
486 if (cbToTryRead) { /* likely */ }
487 else /* overflowed */
488 {
489 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
490 cbToTryRead = UINT32_MAX;
491 }
492 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
493 Assert(GCPtrPC <= UINT32_MAX);
494 }
495
496 PGMPTWALK Walk;
497 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk);
498 if (RT_SUCCESS(rc))
499 Assert(Walk.fSucceeded); /* probable. */
500 else
501 {
502 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
503# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
504 if (Walk.fFailed & PGM_WALKFAIL_EPT)
505 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
506# endif
507 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);
508 }
509 if ((Walk.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3) { /* likely */ }
510 else
511 {
512 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
513# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
514 if (Walk.fFailed & PGM_WALKFAIL_EPT)
515 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
516# endif
517 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
518 }
519 if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
520 else
521 {
522 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
523# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
524 if (Walk.fFailed & PGM_WALKFAIL_EPT)
525 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
526# endif
527 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
528 }
529 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
530 /** @todo Check reserved bits and such stuff. PGM is better at doing
531 * that, so do it when implementing the guest virtual address
532 * TLB... */
533
534 /*
535 * Read the bytes at this address.
536 */
537 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
538 if (cbToTryRead > cbLeftOnPage)
539 cbToTryRead = cbLeftOnPage;
540 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
541 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
542
543 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
544 {
545 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
546 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
547 { /* likely */ }
548 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
549 {
550 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
551 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
552 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
553 }
554 else
555 {
556 Log((RT_SUCCESS(rcStrict)
557 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
558 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
559 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
560 return rcStrict;
561 }
562 }
563 else
564 {
565 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
566 if (RT_SUCCESS(rc))
567 { /* likely */ }
568 else
569 {
570 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
571 GCPtrPC, GCPhys, rc, cbToTryRead));
572 return rc;
573 }
574 }
575 pVCpu->iem.s.cbOpcode = cbToTryRead;
576#endif /* !IEM_WITH_CODE_TLB */
577 return VINF_SUCCESS;
578}
579
580
581/**
582 * Invalidates the IEM TLBs.
583 *
584 * This is called internally as well as by PGM when moving GC mappings.
585 *
586 * @param pVCpu The cross context virtual CPU structure of the calling
587 * thread.
588 */
589VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
590{
591#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
592 Log10(("IEMTlbInvalidateAll\n"));
593# ifdef IEM_WITH_CODE_TLB
594 pVCpu->iem.s.cbInstrBufTotal = 0;
595 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
596 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
597 { /* very likely */ }
598 else
599 {
600 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
601 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
602 while (i-- > 0)
603 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
604 }
605# endif
606
607# ifdef IEM_WITH_DATA_TLB
608 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
609 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
610 { /* very likely */ }
611 else
612 {
613 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
614 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
615 while (i-- > 0)
616 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
617 }
618# endif
619#else
620 RT_NOREF(pVCpu);
621#endif
622}
623
624
625/**
626 * Invalidates a page in the TLBs.
627 *
628 * @param pVCpu The cross context virtual CPU structure of the calling
629 * thread.
630 * @param GCPtr The address of the page to invalidate
631 * @thread EMT(pVCpu)
632 */
633VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
634{
635#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
636 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
637 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
638 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
639 uintptr_t const idx = IEMTLB_TAG_TO_INDEX(GCPtr);
640
641# ifdef IEM_WITH_CODE_TLB
642 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
643 {
644 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
645 if (GCPtr == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
646 pVCpu->iem.s.cbInstrBufTotal = 0;
647 }
648# endif
649
650# ifdef IEM_WITH_DATA_TLB
651 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
652 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
653# endif
654#else
655 NOREF(pVCpu); NOREF(GCPtr);
656#endif
657}
658
659
660#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
661/**
662 * Invalid both TLBs slow fashion following a rollover.
663 *
664 * Worker for IEMTlbInvalidateAllPhysical,
665 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
666 * iemMemMapJmp and others.
667 *
668 * @thread EMT(pVCpu)
669 */
670static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
671{
672 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
673 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
674 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
675
676 unsigned i;
677# ifdef IEM_WITH_CODE_TLB
678 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
679 while (i-- > 0)
680 {
681 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
682 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
683 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
684 }
685# endif
686# ifdef IEM_WITH_DATA_TLB
687 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
688 while (i-- > 0)
689 {
690 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
691 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
692 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
693 }
694# endif
695
696}
697#endif
698
699
700/**
701 * Invalidates the host physical aspects of the IEM TLBs.
702 *
703 * This is called internally as well as by PGM when moving GC mappings.
704 *
705 * @param pVCpu The cross context virtual CPU structure of the calling
706 * thread.
707 * @note Currently not used.
708 */
709VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
710{
711#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
712 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
713 Log10(("IEMTlbInvalidateAllPhysical\n"));
714
715# ifdef IEM_WITH_CODE_TLB
716 pVCpu->iem.s.cbInstrBufTotal = 0;
717# endif
718 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
719 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
720 {
721 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
722 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
723 }
724 else
725 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
726#else
727 NOREF(pVCpu);
728#endif
729}
730
731
732/**
733 * Invalidates the host physical aspects of the IEM TLBs.
734 *
735 * This is called internally as well as by PGM when moving GC mappings.
736 *
737 * @param pVM The cross context VM structure.
738 * @param idCpuCaller The ID of the calling EMT if available to the caller,
739 * otherwise NIL_VMCPUID.
740 *
741 * @remarks Caller holds the PGM lock.
742 */
743VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller)
744{
745#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
746 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
747 if (pVCpuCaller)
748 VMCPU_ASSERT_EMT(pVCpuCaller);
749 Log10(("IEMTlbInvalidateAllPhysicalAllCpus\n"));
750
751 VMCC_FOR_EACH_VMCPU(pVM)
752 {
753# ifdef IEM_WITH_CODE_TLB
754 if (pVCpuCaller == pVCpu)
755 pVCpu->iem.s.cbInstrBufTotal = 0;
756# endif
757
758 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
759 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
760 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
761 { /* likely */}
762 else if (pVCpuCaller == pVCpu)
763 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
764 else
765 {
766 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
767 continue;
768 }
769 ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
770 ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
771 }
772 VMCC_FOR_EACH_VMCPU_END(pVM);
773
774#else
775 RT_NOREF(pVM, idCpuCaller);
776#endif
777}
778
779
780/**
781 * Flushes the prefetch buffer, light version.
782 */
783void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr)
784{
785#ifndef IEM_WITH_CODE_TLB
786 pVCpu->iem.s.cbOpcode = cbInstr;
787#else
788 RT_NOREF(pVCpu, cbInstr);
789#endif
790}
791
792
793/**
794 * Flushes the prefetch buffer, heavy version.
795 */
796void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr)
797{
798#ifndef IEM_WITH_CODE_TLB
799 pVCpu->iem.s.cbOpcode = cbInstr; /* Note! SVM and VT-x may set this to zero on exit, rather than the instruction length. */
800#elif 1
801 pVCpu->iem.s.pbInstrBuf = NULL;
802 pVCpu->iem.s.cbInstrBufTotal = 0;
803 RT_NOREF(cbInstr);
804#else
805 RT_NOREF(pVCpu, cbInstr);
806#endif
807}
808
809
810
811#ifdef IEM_WITH_CODE_TLB
812
813/**
814 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
815 * failure and jumps.
816 *
817 * We end up here for a number of reasons:
818 * - pbInstrBuf isn't yet initialized.
819 * - Advancing beyond the buffer boundrary (e.g. cross page).
820 * - Advancing beyond the CS segment limit.
821 * - Fetching from non-mappable page (e.g. MMIO).
822 *
823 * @param pVCpu The cross context virtual CPU structure of the
824 * calling thread.
825 * @param pvDst Where to return the bytes.
826 * @param cbDst Number of bytes to read. A value of zero is
827 * allowed for initializing pbInstrBuf (the
828 * recompiler does this). In this case it is best
829 * to set pbInstrBuf to NULL prior to the call.
830 */
831void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP
832{
833# ifdef IN_RING3
834 for (;;)
835 {
836 Assert(cbDst <= 8);
837 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
838
839 /*
840 * We might have a partial buffer match, deal with that first to make the
841 * rest simpler. This is the first part of the cross page/buffer case.
842 */
843 if (pVCpu->iem.s.pbInstrBuf != NULL)
844 {
845 if (offBuf < pVCpu->iem.s.cbInstrBuf)
846 {
847 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
848 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
849 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
850
851 cbDst -= cbCopy;
852 pvDst = (uint8_t *)pvDst + cbCopy;
853 offBuf += cbCopy;
854 pVCpu->iem.s.offInstrNextByte += offBuf;
855 }
856 }
857
858 /*
859 * Check segment limit, figuring how much we're allowed to access at this point.
860 *
861 * We will fault immediately if RIP is past the segment limit / in non-canonical
862 * territory. If we do continue, there are one or more bytes to read before we
863 * end up in trouble and we need to do that first before faulting.
864 */
865 RTGCPTR GCPtrFirst;
866 uint32_t cbMaxRead;
867 if (IEM_IS_64BIT_CODE(pVCpu))
868 {
869 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
870 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
871 { /* likely */ }
872 else
873 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
874 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
875 }
876 else
877 {
878 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
879 /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
880 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
881 { /* likely */ }
882 else /** @todo For CPUs older than the 386, we should not necessarily generate \#GP here but wrap around! */
883 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
884 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
885 if (cbMaxRead != 0)
886 { /* likely */ }
887 else
888 {
889 /* Overflowed because address is 0 and limit is max. */
890 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
891 cbMaxRead = X86_PAGE_SIZE;
892 }
893 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
894 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
895 if (cbMaxRead2 < cbMaxRead)
896 cbMaxRead = cbMaxRead2;
897 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
898 }
899
900 /*
901 * Get the TLB entry for this piece of code.
902 */
903 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.CodeTlb, GCPtrFirst);
904 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.CodeTlb, uTag);
905 if (pTlbe->uTag == uTag)
906 {
907 /* likely when executing lots of code, otherwise unlikely */
908# ifdef VBOX_WITH_STATISTICS
909 pVCpu->iem.s.CodeTlb.cTlbHits++;
910# endif
911 }
912 else
913 {
914 pVCpu->iem.s.CodeTlb.cTlbMisses++;
915 PGMPTWALK Walk;
916 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk);
917 if (RT_FAILURE(rc))
918 {
919#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
920 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? */
921 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
922#endif
923 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
924 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, rc);
925 }
926
927 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
928 Assert(Walk.fSucceeded);
929 pTlbe->uTag = uTag;
930 pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
931 | (Walk.fEffective >> X86_PTE_PAE_BIT_NX);
932 pTlbe->GCPhys = Walk.GCPhys;
933 pTlbe->pbMappingR3 = NULL;
934 }
935
936 /*
937 * Check TLB page table level access flags.
938 */
939 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
940 {
941 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && IEM_GET_CPL(pVCpu) == 3)
942 {
943 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
944 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
945 }
946 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
947 {
948 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
949 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
950 }
951 }
952
953 /*
954 * Look up the physical page info if necessary.
955 */
956 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
957 { /* not necessary */ }
958 else
959 {
960 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
961 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
962 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
963 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
964 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
965 { /* likely */ }
966 else
967 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
968 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
969 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
970 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
971 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
972 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
973 }
974
975# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
976 /*
977 * Try do a direct read using the pbMappingR3 pointer.
978 */
979 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
980 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
981 {
982 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
983 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
984 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
985 {
986 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
987 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
988 }
989 else
990 {
991 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
992 if (cbInstr + (uint32_t)cbDst <= 15)
993 {
994 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
995 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
996 }
997 else
998 {
999 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1000 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1001 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1002 }
1003 }
1004 if (cbDst <= cbMaxRead)
1005 {
1006 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0; /** @todo Spurious load effect on branch handling? */
1007 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1008
1009 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1010 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1011 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1012 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1013 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1014 return;
1015 }
1016 pVCpu->iem.s.pbInstrBuf = NULL;
1017
1018 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1019 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1020 }
1021# else
1022# error "refactor as needed"
1023 /*
1024 * If there is no special read handling, so we can read a bit more and
1025 * put it in the prefetch buffer.
1026 */
1027 if ( cbDst < cbMaxRead
1028 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1029 {
1030 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1031 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1032 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1033 { /* likely */ }
1034 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1035 {
1036 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1037 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1038 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1039 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));
1040 }
1041 else
1042 {
1043 Log((RT_SUCCESS(rcStrict)
1044 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1045 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1046 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1047 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1048 }
1049 }
1050# endif
1051 /*
1052 * Special read handling, so only read exactly what's needed.
1053 * This is a highly unlikely scenario.
1054 */
1055 else
1056 {
1057 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1058
1059 /* Check instruction length. */
1060 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1061 if (RT_LIKELY(cbInstr + cbDst <= 15))
1062 { /* likely */ }
1063 else
1064 {
1065 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0) [slow]\n",
1066 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1067 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1068 }
1069
1070 /* Do the reading. */
1071 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1072 if (cbToRead > 0)
1073 {
1074 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1075 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1076 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1077 { /* likely */ }
1078 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1079 {
1080 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1081 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1082 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1083 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));
1084 }
1085 else
1086 {
1087 Log((RT_SUCCESS(rcStrict)
1088 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1089 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1090 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1091 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1092 }
1093 }
1094
1095 /* Update the state and probably return. */
1096 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1097 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0;
1098 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1099
1100 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1101 pVCpu->iem.s.offInstrNextByte = offPg + cbInstr + cbToRead;
1102 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead + cbInstr) - cbToRead - cbInstr;
1103 pVCpu->iem.s.cbInstrBufTotal = X86_PAGE_SIZE; /** @todo ??? */
1104 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1105 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1106 pVCpu->iem.s.pbInstrBuf = NULL;
1107 if (cbToRead == cbDst)
1108 return;
1109 }
1110
1111 /*
1112 * More to read, loop.
1113 */
1114 cbDst -= cbMaxRead;
1115 pvDst = (uint8_t *)pvDst + cbMaxRead;
1116 }
1117# else /* !IN_RING3 */
1118 RT_NOREF(pvDst, cbDst);
1119 if (pvDst || cbDst)
1120 IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);
1121# endif /* !IN_RING3 */
1122}
1123
1124#else /* !IEM_WITH_CODE_TLB */
1125
1126/**
1127 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1128 * exception if it fails.
1129 *
1130 * @returns Strict VBox status code.
1131 * @param pVCpu The cross context virtual CPU structure of the
1132 * calling thread.
1133 * @param cbMin The minimum number of bytes relative offOpcode
1134 * that must be read.
1135 */
1136VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
1137{
1138 /*
1139 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1140 *
1141 * First translate CS:rIP to a physical address.
1142 */
1143 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;
1144 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1145 uint8_t const cbLeft = cbOpcode - offOpcode;
1146 Assert(cbLeft < cbMin);
1147 Assert(cbOpcode <= sizeof(pVCpu->iem.s.abOpcode));
1148
1149 uint32_t cbToTryRead;
1150 RTGCPTR GCPtrNext;
1151 if (IEM_IS_64BIT_CODE(pVCpu))
1152 {
1153 GCPtrNext = pVCpu->cpum.GstCtx.rip + cbOpcode;
1154 if (!IEM_IS_CANONICAL(GCPtrNext))
1155 return iemRaiseGeneralProtectionFault0(pVCpu);
1156 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1157 }
1158 else
1159 {
1160 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1161 /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
1162 GCPtrNext32 += cbOpcode;
1163 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1164 /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
1165 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1166 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1167 if (!cbToTryRead) /* overflowed */
1168 {
1169 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1170 cbToTryRead = UINT32_MAX;
1171 /** @todo check out wrapping around the code segment. */
1172 }
1173 if (cbToTryRead < cbMin - cbLeft)
1174 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1175 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1176
1177 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1178 if (cbToTryRead > cbLeftOnPage)
1179 cbToTryRead = cbLeftOnPage;
1180 }
1181
1182 /* Restrict to opcode buffer space.
1183
1184 We're making ASSUMPTIONS here based on work done previously in
1185 iemInitDecoderAndPrefetchOpcodes, where bytes from the first page will
1186 be fetched in case of an instruction crossing two pages. */
1187 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - cbOpcode)
1188 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - cbOpcode;
1189 if (RT_LIKELY(cbToTryRead + cbLeft >= cbMin))
1190 { /* likely */ }
1191 else
1192 {
1193 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1194 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, offOpcode, cbMin));
1195 return iemRaiseGeneralProtectionFault0(pVCpu);
1196 }
1197
1198 PGMPTWALK Walk;
1199 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk);
1200 if (RT_FAILURE(rc))
1201 {
1202 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1203#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1204 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1205 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1206#endif
1207 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc);
1208 }
1209 if (!(Walk.fEffective & X86_PTE_US) && IEM_GET_CPL(pVCpu) == 3)
1210 {
1211 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1212#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1213 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1214 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1215#endif
1216 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1217 }
1218 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1219 {
1220 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1221#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1222 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1223 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1224#endif
1225 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1226 }
1227 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1228 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode));
1229 /** @todo Check reserved bits and such stuff. PGM is better at doing
1230 * that, so do it when implementing the guest virtual address
1231 * TLB... */
1232
1233 /*
1234 * Read the bytes at this address.
1235 *
1236 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1237 * and since PATM should only patch the start of an instruction there
1238 * should be no need to check again here.
1239 */
1240 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
1241 {
1242 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[cbOpcode],
1243 cbToTryRead, PGMACCESSORIGIN_IEM);
1244 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1245 { /* likely */ }
1246 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1247 {
1248 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1249 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1250 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1251 }
1252 else
1253 {
1254 Log((RT_SUCCESS(rcStrict)
1255 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1256 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1257 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1258 return rcStrict;
1259 }
1260 }
1261 else
1262 {
1263 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[cbOpcode], GCPhys, cbToTryRead);
1264 if (RT_SUCCESS(rc))
1265 { /* likely */ }
1266 else
1267 {
1268 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1269 return rc;
1270 }
1271 }
1272 pVCpu->iem.s.cbOpcode = cbOpcode + cbToTryRead;
1273 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1274
1275 return VINF_SUCCESS;
1276}
1277
1278#endif /* !IEM_WITH_CODE_TLB */
1279#ifndef IEM_WITH_SETJMP
1280
1281/**
1282 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1283 *
1284 * @returns Strict VBox status code.
1285 * @param pVCpu The cross context virtual CPU structure of the
1286 * calling thread.
1287 * @param pb Where to return the opcode byte.
1288 */
1289VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1290{
1291 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1292 if (rcStrict == VINF_SUCCESS)
1293 {
1294 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1295 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1296 pVCpu->iem.s.offOpcode = offOpcode + 1;
1297 }
1298 else
1299 *pb = 0;
1300 return rcStrict;
1301}
1302
1303#else /* IEM_WITH_SETJMP */
1304
1305/**
1306 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1307 *
1308 * @returns The opcode byte.
1309 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1310 */
1311uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1312{
1313# ifdef IEM_WITH_CODE_TLB
1314 uint8_t u8;
1315 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1316 return u8;
1317# else
1318 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1319 if (rcStrict == VINF_SUCCESS)
1320 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1321 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1322# endif
1323}
1324
1325#endif /* IEM_WITH_SETJMP */
1326
1327#ifndef IEM_WITH_SETJMP
1328
1329/**
1330 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1331 *
1332 * @returns Strict VBox status code.
1333 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1334 * @param pu16 Where to return the opcode dword.
1335 */
1336VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1337{
1338 uint8_t u8;
1339 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1340 if (rcStrict == VINF_SUCCESS)
1341 *pu16 = (int8_t)u8;
1342 return rcStrict;
1343}
1344
1345
1346/**
1347 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1348 *
1349 * @returns Strict VBox status code.
1350 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1351 * @param pu32 Where to return the opcode dword.
1352 */
1353VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1354{
1355 uint8_t u8;
1356 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1357 if (rcStrict == VINF_SUCCESS)
1358 *pu32 = (int8_t)u8;
1359 return rcStrict;
1360}
1361
1362
1363/**
1364 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1365 *
1366 * @returns Strict VBox status code.
1367 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1368 * @param pu64 Where to return the opcode qword.
1369 */
1370VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1371{
1372 uint8_t u8;
1373 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1374 if (rcStrict == VINF_SUCCESS)
1375 *pu64 = (int8_t)u8;
1376 return rcStrict;
1377}
1378
1379#endif /* !IEM_WITH_SETJMP */
1380
1381
1382#ifndef IEM_WITH_SETJMP
1383
1384/**
1385 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1386 *
1387 * @returns Strict VBox status code.
1388 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1389 * @param pu16 Where to return the opcode word.
1390 */
1391VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1392{
1393 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1394 if (rcStrict == VINF_SUCCESS)
1395 {
1396 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1397# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1398 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1399# else
1400 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1401# endif
1402 pVCpu->iem.s.offOpcode = offOpcode + 2;
1403 }
1404 else
1405 *pu16 = 0;
1406 return rcStrict;
1407}
1408
1409#else /* IEM_WITH_SETJMP */
1410
1411/**
1412 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1413 *
1414 * @returns The opcode word.
1415 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1416 */
1417uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1418{
1419# ifdef IEM_WITH_CODE_TLB
1420 uint16_t u16;
1421 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1422 return u16;
1423# else
1424 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1425 if (rcStrict == VINF_SUCCESS)
1426 {
1427 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1428 pVCpu->iem.s.offOpcode += 2;
1429# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1430 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1431# else
1432 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1433# endif
1434 }
1435 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1436# endif
1437}
1438
1439#endif /* IEM_WITH_SETJMP */
1440
1441#ifndef IEM_WITH_SETJMP
1442
1443/**
1444 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1445 *
1446 * @returns Strict VBox status code.
1447 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1448 * @param pu32 Where to return the opcode double word.
1449 */
1450VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1451{
1452 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1453 if (rcStrict == VINF_SUCCESS)
1454 {
1455 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1456 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1457 pVCpu->iem.s.offOpcode = offOpcode + 2;
1458 }
1459 else
1460 *pu32 = 0;
1461 return rcStrict;
1462}
1463
1464
1465/**
1466 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1467 *
1468 * @returns Strict VBox status code.
1469 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1470 * @param pu64 Where to return the opcode quad word.
1471 */
1472VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1473{
1474 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1475 if (rcStrict == VINF_SUCCESS)
1476 {
1477 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1478 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1479 pVCpu->iem.s.offOpcode = offOpcode + 2;
1480 }
1481 else
1482 *pu64 = 0;
1483 return rcStrict;
1484}
1485
1486#endif /* !IEM_WITH_SETJMP */
1487
1488#ifndef IEM_WITH_SETJMP
1489
1490/**
1491 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1492 *
1493 * @returns Strict VBox status code.
1494 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1495 * @param pu32 Where to return the opcode dword.
1496 */
1497VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1498{
1499 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1500 if (rcStrict == VINF_SUCCESS)
1501 {
1502 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1503# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1504 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1505# else
1506 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1507 pVCpu->iem.s.abOpcode[offOpcode + 1],
1508 pVCpu->iem.s.abOpcode[offOpcode + 2],
1509 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1510# endif
1511 pVCpu->iem.s.offOpcode = offOpcode + 4;
1512 }
1513 else
1514 *pu32 = 0;
1515 return rcStrict;
1516}
1517
1518#else /* IEM_WITH_SETJMP */
1519
1520/**
1521 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1522 *
1523 * @returns The opcode dword.
1524 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1525 */
1526uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1527{
1528# ifdef IEM_WITH_CODE_TLB
1529 uint32_t u32;
1530 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1531 return u32;
1532# else
1533 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1534 if (rcStrict == VINF_SUCCESS)
1535 {
1536 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1537 pVCpu->iem.s.offOpcode = offOpcode + 4;
1538# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1539 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1540# else
1541 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1542 pVCpu->iem.s.abOpcode[offOpcode + 1],
1543 pVCpu->iem.s.abOpcode[offOpcode + 2],
1544 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1545# endif
1546 }
1547 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1548# endif
1549}
1550
1551#endif /* IEM_WITH_SETJMP */
1552
1553#ifndef IEM_WITH_SETJMP
1554
1555/**
1556 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1557 *
1558 * @returns Strict VBox status code.
1559 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1560 * @param pu64 Where to return the opcode dword.
1561 */
1562VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1563{
1564 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1565 if (rcStrict == VINF_SUCCESS)
1566 {
1567 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1568 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1569 pVCpu->iem.s.abOpcode[offOpcode + 1],
1570 pVCpu->iem.s.abOpcode[offOpcode + 2],
1571 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1572 pVCpu->iem.s.offOpcode = offOpcode + 4;
1573 }
1574 else
1575 *pu64 = 0;
1576 return rcStrict;
1577}
1578
1579
1580/**
1581 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1582 *
1583 * @returns Strict VBox status code.
1584 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1585 * @param pu64 Where to return the opcode qword.
1586 */
1587VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1588{
1589 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1590 if (rcStrict == VINF_SUCCESS)
1591 {
1592 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1593 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1594 pVCpu->iem.s.abOpcode[offOpcode + 1],
1595 pVCpu->iem.s.abOpcode[offOpcode + 2],
1596 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1597 pVCpu->iem.s.offOpcode = offOpcode + 4;
1598 }
1599 else
1600 *pu64 = 0;
1601 return rcStrict;
1602}
1603
1604#endif /* !IEM_WITH_SETJMP */
1605
1606#ifndef IEM_WITH_SETJMP
1607
1608/**
1609 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1610 *
1611 * @returns Strict VBox status code.
1612 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1613 * @param pu64 Where to return the opcode qword.
1614 */
1615VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1616{
1617 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1618 if (rcStrict == VINF_SUCCESS)
1619 {
1620 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1621# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1622 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1623# else
1624 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1625 pVCpu->iem.s.abOpcode[offOpcode + 1],
1626 pVCpu->iem.s.abOpcode[offOpcode + 2],
1627 pVCpu->iem.s.abOpcode[offOpcode + 3],
1628 pVCpu->iem.s.abOpcode[offOpcode + 4],
1629 pVCpu->iem.s.abOpcode[offOpcode + 5],
1630 pVCpu->iem.s.abOpcode[offOpcode + 6],
1631 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1632# endif
1633 pVCpu->iem.s.offOpcode = offOpcode + 8;
1634 }
1635 else
1636 *pu64 = 0;
1637 return rcStrict;
1638}
1639
1640#else /* IEM_WITH_SETJMP */
1641
1642/**
1643 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1644 *
1645 * @returns The opcode qword.
1646 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1647 */
1648uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1649{
1650# ifdef IEM_WITH_CODE_TLB
1651 uint64_t u64;
1652 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1653 return u64;
1654# else
1655 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1656 if (rcStrict == VINF_SUCCESS)
1657 {
1658 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1659 pVCpu->iem.s.offOpcode = offOpcode + 8;
1660# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1661 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1662# else
1663 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1664 pVCpu->iem.s.abOpcode[offOpcode + 1],
1665 pVCpu->iem.s.abOpcode[offOpcode + 2],
1666 pVCpu->iem.s.abOpcode[offOpcode + 3],
1667 pVCpu->iem.s.abOpcode[offOpcode + 4],
1668 pVCpu->iem.s.abOpcode[offOpcode + 5],
1669 pVCpu->iem.s.abOpcode[offOpcode + 6],
1670 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1671# endif
1672 }
1673 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1674# endif
1675}
1676
1677#endif /* IEM_WITH_SETJMP */
1678
1679
1680
1681/** @name Misc Worker Functions.
1682 * @{
1683 */
1684
1685/**
1686 * Gets the exception class for the specified exception vector.
1687 *
1688 * @returns The class of the specified exception.
1689 * @param uVector The exception vector.
1690 */
1691static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
1692{
1693 Assert(uVector <= X86_XCPT_LAST);
1694 switch (uVector)
1695 {
1696 case X86_XCPT_DE:
1697 case X86_XCPT_TS:
1698 case X86_XCPT_NP:
1699 case X86_XCPT_SS:
1700 case X86_XCPT_GP:
1701 case X86_XCPT_SX: /* AMD only */
1702 return IEMXCPTCLASS_CONTRIBUTORY;
1703
1704 case X86_XCPT_PF:
1705 case X86_XCPT_VE: /* Intel only */
1706 return IEMXCPTCLASS_PAGE_FAULT;
1707
1708 case X86_XCPT_DF:
1709 return IEMXCPTCLASS_DOUBLE_FAULT;
1710 }
1711 return IEMXCPTCLASS_BENIGN;
1712}
1713
1714
1715/**
1716 * Evaluates how to handle an exception caused during delivery of another event
1717 * (exception / interrupt).
1718 *
1719 * @returns How to handle the recursive exception.
1720 * @param pVCpu The cross context virtual CPU structure of the
1721 * calling thread.
1722 * @param fPrevFlags The flags of the previous event.
1723 * @param uPrevVector The vector of the previous event.
1724 * @param fCurFlags The flags of the current exception.
1725 * @param uCurVector The vector of the current exception.
1726 * @param pfXcptRaiseInfo Where to store additional information about the
1727 * exception condition. Optional.
1728 */
1729VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
1730 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
1731{
1732 /*
1733 * Only CPU exceptions can be raised while delivering other events, software interrupt
1734 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
1735 */
1736 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
1737 Assert(pVCpu); RT_NOREF(pVCpu);
1738 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
1739
1740 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
1741 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
1742 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1743 {
1744 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
1745 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
1746 {
1747 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
1748 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
1749 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
1750 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
1751 {
1752 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1753 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
1754 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
1755 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
1756 uCurVector, pVCpu->cpum.GstCtx.cr2));
1757 }
1758 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1759 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
1760 {
1761 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1762 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
1763 }
1764 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
1765 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1766 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
1767 {
1768 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
1769 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
1770 }
1771 }
1772 else
1773 {
1774 if (uPrevVector == X86_XCPT_NMI)
1775 {
1776 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
1777 if (uCurVector == X86_XCPT_PF)
1778 {
1779 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
1780 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
1781 }
1782 }
1783 else if ( uPrevVector == X86_XCPT_AC
1784 && uCurVector == X86_XCPT_AC)
1785 {
1786 enmRaise = IEMXCPTRAISE_CPU_HANG;
1787 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
1788 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
1789 }
1790 }
1791 }
1792 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
1793 {
1794 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
1795 if (uCurVector == X86_XCPT_PF)
1796 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
1797 }
1798 else
1799 {
1800 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
1801 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
1802 }
1803
1804 if (pfXcptRaiseInfo)
1805 *pfXcptRaiseInfo = fRaiseInfo;
1806 return enmRaise;
1807}
1808
1809
1810/**
1811 * Enters the CPU shutdown state initiated by a triple fault or other
1812 * unrecoverable conditions.
1813 *
1814 * @returns Strict VBox status code.
1815 * @param pVCpu The cross context virtual CPU structure of the
1816 * calling thread.
1817 */
1818static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
1819{
1820 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1821 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
1822
1823 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
1824 {
1825 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
1826 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1827 }
1828
1829 RT_NOREF(pVCpu);
1830 return VINF_EM_TRIPLE_FAULT;
1831}
1832
1833
1834/**
1835 * Validates a new SS segment.
1836 *
1837 * @returns VBox strict status code.
1838 * @param pVCpu The cross context virtual CPU structure of the
1839 * calling thread.
1840 * @param NewSS The new SS selctor.
1841 * @param uCpl The CPL to load the stack for.
1842 * @param pDesc Where to return the descriptor.
1843 */
1844static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
1845{
1846 /* Null selectors are not allowed (we're not called for dispatching
1847 interrupts with SS=0 in long mode). */
1848 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1849 {
1850 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
1851 return iemRaiseTaskSwitchFault0(pVCpu);
1852 }
1853
1854 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1855 if ((NewSS & X86_SEL_RPL) != uCpl)
1856 {
1857 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1858 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1859 }
1860
1861 /*
1862 * Read the descriptor.
1863 */
1864 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
1865 if (rcStrict != VINF_SUCCESS)
1866 return rcStrict;
1867
1868 /*
1869 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1870 */
1871 if (!pDesc->Legacy.Gen.u1DescType)
1872 {
1873 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1874 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1875 }
1876
1877 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1878 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1879 {
1880 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1881 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1882 }
1883 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1884 {
1885 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1886 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1887 }
1888
1889 /* Is it there? */
1890 /** @todo testcase: Is this checked before the canonical / limit check below? */
1891 if (!pDesc->Legacy.Gen.u1Present)
1892 {
1893 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1894 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
1895 }
1896
1897 return VINF_SUCCESS;
1898}
1899
1900/** @} */
1901
1902
1903/** @name Raising Exceptions.
1904 *
1905 * @{
1906 */
1907
1908
1909/**
1910 * Loads the specified stack far pointer from the TSS.
1911 *
1912 * @returns VBox strict status code.
1913 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1914 * @param uCpl The CPL to load the stack for.
1915 * @param pSelSS Where to return the new stack segment.
1916 * @param puEsp Where to return the new stack pointer.
1917 */
1918static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
1919{
1920 VBOXSTRICTRC rcStrict;
1921 Assert(uCpl < 4);
1922
1923 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1924 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
1925 {
1926 /*
1927 * 16-bit TSS (X86TSS16).
1928 */
1929 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1930 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1931 {
1932 uint32_t off = uCpl * 4 + 2;
1933 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1934 {
1935 /** @todo check actual access pattern here. */
1936 uint32_t u32Tmp = 0; /* gcc maybe... */
1937 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1938 if (rcStrict == VINF_SUCCESS)
1939 {
1940 *puEsp = RT_LOWORD(u32Tmp);
1941 *pSelSS = RT_HIWORD(u32Tmp);
1942 return VINF_SUCCESS;
1943 }
1944 }
1945 else
1946 {
1947 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1948 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1949 }
1950 break;
1951 }
1952
1953 /*
1954 * 32-bit TSS (X86TSS32).
1955 */
1956 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1957 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1958 {
1959 uint32_t off = uCpl * 8 + 4;
1960 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1961 {
1962/** @todo check actual access pattern here. */
1963 uint64_t u64Tmp;
1964 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1965 if (rcStrict == VINF_SUCCESS)
1966 {
1967 *puEsp = u64Tmp & UINT32_MAX;
1968 *pSelSS = (RTSEL)(u64Tmp >> 32);
1969 return VINF_SUCCESS;
1970 }
1971 }
1972 else
1973 {
1974 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1975 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1976 }
1977 break;
1978 }
1979
1980 default:
1981 AssertFailed();
1982 rcStrict = VERR_IEM_IPE_4;
1983 break;
1984 }
1985
1986 *puEsp = 0; /* make gcc happy */
1987 *pSelSS = 0; /* make gcc happy */
1988 return rcStrict;
1989}
1990
1991
1992/**
1993 * Loads the specified stack pointer from the 64-bit TSS.
1994 *
1995 * @returns VBox strict status code.
1996 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1997 * @param uCpl The CPL to load the stack for.
1998 * @param uIst The interrupt stack table index, 0 if to use uCpl.
1999 * @param puRsp Where to return the new stack pointer.
2000 */
2001static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
2002{
2003 Assert(uCpl < 4);
2004 Assert(uIst < 8);
2005 *puRsp = 0; /* make gcc happy */
2006
2007 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2008 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
2009
2010 uint32_t off;
2011 if (uIst)
2012 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
2013 else
2014 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
2015 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
2016 {
2017 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
2018 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2019 }
2020
2021 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2022}
2023
2024
2025/**
2026 * Adjust the CPU state according to the exception being raised.
2027 *
2028 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2029 * @param u8Vector The exception that has been raised.
2030 */
2031DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
2032{
2033 switch (u8Vector)
2034 {
2035 case X86_XCPT_DB:
2036 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
2037 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
2038 break;
2039 /** @todo Read the AMD and Intel exception reference... */
2040 }
2041}
2042
2043
2044/**
2045 * Implements exceptions and interrupts for real mode.
2046 *
2047 * @returns VBox strict status code.
2048 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2049 * @param cbInstr The number of bytes to offset rIP by in the return
2050 * address.
2051 * @param u8Vector The interrupt / exception vector number.
2052 * @param fFlags The flags.
2053 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2054 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2055 */
2056static VBOXSTRICTRC
2057iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
2058 uint8_t cbInstr,
2059 uint8_t u8Vector,
2060 uint32_t fFlags,
2061 uint16_t uErr,
2062 uint64_t uCr2) RT_NOEXCEPT
2063{
2064 NOREF(uErr); NOREF(uCr2);
2065 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2066
2067 /*
2068 * Read the IDT entry.
2069 */
2070 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2071 {
2072 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2073 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2074 }
2075 RTFAR16 Idte;
2076 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
2077 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2078 {
2079 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2080 return rcStrict;
2081 }
2082
2083 /*
2084 * Push the stack frame.
2085 */
2086 uint16_t *pu16Frame;
2087 uint64_t uNewRsp;
2088 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &uNewRsp);
2089 if (rcStrict != VINF_SUCCESS)
2090 return rcStrict;
2091
2092 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
2093#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
2094 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
2095 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
2096 fEfl |= UINT16_C(0xf000);
2097#endif
2098 pu16Frame[2] = (uint16_t)fEfl;
2099 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
2100 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
2101 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
2102 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2103 return rcStrict;
2104
2105 /*
2106 * Load the vector address into cs:ip and make exception specific state
2107 * adjustments.
2108 */
2109 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
2110 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
2111 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2112 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
2113 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2114 pVCpu->cpum.GstCtx.rip = Idte.off;
2115 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
2116 IEMMISC_SET_EFL(pVCpu, fEfl);
2117
2118 /** @todo do we actually do this in real mode? */
2119 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2120 iemRaiseXcptAdjustState(pVCpu, u8Vector);
2121
2122 /* The IEM_F_MODE_XXX and IEM_F_X86_CPL_MASK doesn't really change here,
2123 so best leave them alone in case we're in a weird kind of real mode... */
2124
2125 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2126}
2127
2128
2129/**
2130 * Loads a NULL data selector into when coming from V8086 mode.
2131 *
2132 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2133 * @param pSReg Pointer to the segment register.
2134 */
2135DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
2136{
2137 pSReg->Sel = 0;
2138 pSReg->ValidSel = 0;
2139 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2140 {
2141 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2142 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2143 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2144 }
2145 else
2146 {
2147 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2148 /** @todo check this on AMD-V */
2149 pSReg->u64Base = 0;
2150 pSReg->u32Limit = 0;
2151 }
2152}
2153
2154
2155/**
2156 * Loads a segment selector during a task switch in V8086 mode.
2157 *
2158 * @param pSReg Pointer to the segment register.
2159 * @param uSel The selector value to load.
2160 */
2161DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
2162{
2163 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2164 pSReg->Sel = uSel;
2165 pSReg->ValidSel = uSel;
2166 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2167 pSReg->u64Base = uSel << 4;
2168 pSReg->u32Limit = 0xffff;
2169 pSReg->Attr.u = 0xf3;
2170}
2171
2172
2173/**
2174 * Loads a segment selector during a task switch in protected mode.
2175 *
2176 * In this task switch scenario, we would throw \#TS exceptions rather than
2177 * \#GPs.
2178 *
2179 * @returns VBox strict status code.
2180 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2181 * @param pSReg Pointer to the segment register.
2182 * @param uSel The new selector value.
2183 *
2184 * @remarks This does _not_ handle CS or SS.
2185 * @remarks This expects IEM_GET_CPL(pVCpu) to return an up to date value.
2186 */
2187static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
2188{
2189 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2190
2191 /* Null data selector. */
2192 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2193 {
2194 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2195 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2196 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2197 return VINF_SUCCESS;
2198 }
2199
2200 /* Fetch the descriptor. */
2201 IEMSELDESC Desc;
2202 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2203 if (rcStrict != VINF_SUCCESS)
2204 {
2205 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2206 VBOXSTRICTRC_VAL(rcStrict)));
2207 return rcStrict;
2208 }
2209
2210 /* Must be a data segment or readable code segment. */
2211 if ( !Desc.Legacy.Gen.u1DescType
2212 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2213 {
2214 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2215 Desc.Legacy.Gen.u4Type));
2216 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2217 }
2218
2219 /* Check privileges for data segments and non-conforming code segments. */
2220 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2221 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2222 {
2223 /* The RPL and the new CPL must be less than or equal to the DPL. */
2224 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2225 || (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl))
2226 {
2227 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2228 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
2229 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2230 }
2231 }
2232
2233 /* Is it there? */
2234 if (!Desc.Legacy.Gen.u1Present)
2235 {
2236 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2237 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2238 }
2239
2240 /* The base and limit. */
2241 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2242 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2243
2244 /*
2245 * Ok, everything checked out fine. Now set the accessed bit before
2246 * committing the result into the registers.
2247 */
2248 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2249 {
2250 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2251 if (rcStrict != VINF_SUCCESS)
2252 return rcStrict;
2253 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2254 }
2255
2256 /* Commit */
2257 pSReg->Sel = uSel;
2258 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2259 pSReg->u32Limit = cbLimit;
2260 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2261 pSReg->ValidSel = uSel;
2262 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2263 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2264 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2265
2266 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2267 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2268 return VINF_SUCCESS;
2269}
2270
2271
2272/**
2273 * Performs a task switch.
2274 *
2275 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2276 * caller is responsible for performing the necessary checks (like DPL, TSS
2277 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2278 * reference for JMP, CALL, IRET.
2279 *
2280 * If the task switch is the due to a software interrupt or hardware exception,
2281 * the caller is responsible for validating the TSS selector and descriptor. See
2282 * Intel Instruction reference for INT n.
2283 *
2284 * @returns VBox strict status code.
2285 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2286 * @param enmTaskSwitch The cause of the task switch.
2287 * @param uNextEip The EIP effective after the task switch.
2288 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2289 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2290 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2291 * @param SelTSS The TSS selector of the new task.
2292 * @param pNewDescTSS Pointer to the new TSS descriptor.
2293 */
2294VBOXSTRICTRC
2295iemTaskSwitch(PVMCPUCC pVCpu,
2296 IEMTASKSWITCH enmTaskSwitch,
2297 uint32_t uNextEip,
2298 uint32_t fFlags,
2299 uint16_t uErr,
2300 uint64_t uCr2,
2301 RTSEL SelTSS,
2302 PIEMSELDESC pNewDescTSS) RT_NOEXCEPT
2303{
2304 Assert(!IEM_IS_REAL_MODE(pVCpu));
2305 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2306 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2307
2308 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
2309 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2310 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2311 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2312 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2313
2314 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2315 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2316
2317 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
2318 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
2319
2320 /* Update CR2 in case it's a page-fault. */
2321 /** @todo This should probably be done much earlier in IEM/PGM. See
2322 * @bugref{5653#c49}. */
2323 if (fFlags & IEM_XCPT_FLAGS_CR2)
2324 pVCpu->cpum.GstCtx.cr2 = uCr2;
2325
2326 /*
2327 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2328 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2329 */
2330 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
2331 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2332 if (uNewTSSLimit < uNewTSSLimitMin)
2333 {
2334 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
2335 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
2336 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2337 }
2338
2339 /*
2340 * Task switches in VMX non-root mode always cause task switches.
2341 * The new TSS must have been read and validated (DPL, limits etc.) before a
2342 * task-switch VM-exit commences.
2343 *
2344 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2345 */
2346 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2347 {
2348 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
2349 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
2350 }
2351
2352 /*
2353 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2354 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2355 */
2356 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2357 {
2358 uint32_t const uExitInfo1 = SelTSS;
2359 uint32_t uExitInfo2 = uErr;
2360 switch (enmTaskSwitch)
2361 {
2362 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2363 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2364 default: break;
2365 }
2366 if (fFlags & IEM_XCPT_FLAGS_ERR)
2367 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2368 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2369 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2370
2371 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2372 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2373 RT_NOREF2(uExitInfo1, uExitInfo2);
2374 }
2375
2376 /*
2377 * Check the current TSS limit. The last written byte to the current TSS during the
2378 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2379 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2380 *
2381 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2382 * end up with smaller than "legal" TSS limits.
2383 */
2384 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2385 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
2386 if (uCurTSSLimit < uCurTSSLimitMin)
2387 {
2388 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
2389 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
2390 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2391 }
2392
2393 /*
2394 * Verify that the new TSS can be accessed and map it. Map only the required contents
2395 * and not the entire TSS.
2396 */
2397 void *pvNewTSS;
2398 uint32_t const cbNewTSS = uNewTSSLimitMin + 1;
2399 RTGCPTR const GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
2400 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2401 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2402 * not perform correct translation if this happens. See Intel spec. 7.2.1
2403 * "Task-State Segment". */
2404 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW, 0);
2405 if (rcStrict != VINF_SUCCESS)
2406 {
2407 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
2408 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
2409 return rcStrict;
2410 }
2411
2412 /*
2413 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2414 */
2415 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
2416 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2417 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2418 {
2419 PX86DESC pDescCurTSS;
2420 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
2421 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2422 if (rcStrict != VINF_SUCCESS)
2423 {
2424 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2425 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2426 return rcStrict;
2427 }
2428
2429 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2430 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
2431 if (rcStrict != VINF_SUCCESS)
2432 {
2433 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2434 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2435 return rcStrict;
2436 }
2437
2438 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2439 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2440 {
2441 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2442 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2443 fEFlags &= ~X86_EFL_NT;
2444 }
2445 }
2446
2447 /*
2448 * Save the CPU state into the current TSS.
2449 */
2450 RTGCPTR const GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
2451 if (GCPtrNewTSS == GCPtrCurTSS)
2452 {
2453 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
2454 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2455 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,
2456 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2457 pVCpu->cpum.GstCtx.ldtr.Sel));
2458 }
2459 if (fIsNewTSS386)
2460 {
2461 /*
2462 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2463 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2464 */
2465 void *pvCurTSS32;
2466 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
2467 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2468 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2469 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2470 if (rcStrict != VINF_SUCCESS)
2471 {
2472 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2473 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2474 return rcStrict;
2475 }
2476
2477 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2478 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
2479 pCurTSS32->eip = uNextEip;
2480 pCurTSS32->eflags = fEFlags;
2481 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
2482 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
2483 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
2484 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
2485 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
2486 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
2487 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
2488 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
2489 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
2490 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2491 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2492 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2493 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2494 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2495
2496 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
2497 if (rcStrict != VINF_SUCCESS)
2498 {
2499 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2500 VBOXSTRICTRC_VAL(rcStrict)));
2501 return rcStrict;
2502 }
2503 }
2504 else
2505 {
2506 /*
2507 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2508 */
2509 void *pvCurTSS16;
2510 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
2511 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2512 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2513 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2514 if (rcStrict != VINF_SUCCESS)
2515 {
2516 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2517 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2518 return rcStrict;
2519 }
2520
2521 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2522 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
2523 pCurTSS16->ip = uNextEip;
2524 pCurTSS16->flags = (uint16_t)fEFlags;
2525 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
2526 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
2527 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
2528 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
2529 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
2530 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
2531 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
2532 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
2533 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
2534 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2535 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2536 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2537
2538 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
2539 if (rcStrict != VINF_SUCCESS)
2540 {
2541 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2542 VBOXSTRICTRC_VAL(rcStrict)));
2543 return rcStrict;
2544 }
2545 }
2546
2547 /*
2548 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2549 */
2550 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2551 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2552 {
2553 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2554 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
2555 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2556 }
2557
2558 /*
2559 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2560 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2561 */
2562 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2563 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2564 bool fNewDebugTrap;
2565 if (fIsNewTSS386)
2566 {
2567 PCX86TSS32 pNewTSS32 = (PCX86TSS32)pvNewTSS;
2568 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
2569 uNewEip = pNewTSS32->eip;
2570 uNewEflags = pNewTSS32->eflags;
2571 uNewEax = pNewTSS32->eax;
2572 uNewEcx = pNewTSS32->ecx;
2573 uNewEdx = pNewTSS32->edx;
2574 uNewEbx = pNewTSS32->ebx;
2575 uNewEsp = pNewTSS32->esp;
2576 uNewEbp = pNewTSS32->ebp;
2577 uNewEsi = pNewTSS32->esi;
2578 uNewEdi = pNewTSS32->edi;
2579 uNewES = pNewTSS32->es;
2580 uNewCS = pNewTSS32->cs;
2581 uNewSS = pNewTSS32->ss;
2582 uNewDS = pNewTSS32->ds;
2583 uNewFS = pNewTSS32->fs;
2584 uNewGS = pNewTSS32->gs;
2585 uNewLdt = pNewTSS32->selLdt;
2586 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
2587 }
2588 else
2589 {
2590 PCX86TSS16 pNewTSS16 = (PCX86TSS16)pvNewTSS;
2591 uNewCr3 = 0;
2592 uNewEip = pNewTSS16->ip;
2593 uNewEflags = pNewTSS16->flags;
2594 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
2595 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
2596 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
2597 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
2598 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
2599 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
2600 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
2601 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
2602 uNewES = pNewTSS16->es;
2603 uNewCS = pNewTSS16->cs;
2604 uNewSS = pNewTSS16->ss;
2605 uNewDS = pNewTSS16->ds;
2606 uNewFS = 0;
2607 uNewGS = 0;
2608 uNewLdt = pNewTSS16->selLdt;
2609 fNewDebugTrap = false;
2610 }
2611
2612 if (GCPtrNewTSS == GCPtrCurTSS)
2613 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2614 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2615
2616 /*
2617 * We're done accessing the new TSS.
2618 */
2619 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
2620 if (rcStrict != VINF_SUCCESS)
2621 {
2622 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2623 return rcStrict;
2624 }
2625
2626 /*
2627 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2628 */
2629 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2630 {
2631 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
2632 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2633 if (rcStrict != VINF_SUCCESS)
2634 {
2635 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2636 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2637 return rcStrict;
2638 }
2639
2640 /* Check that the descriptor indicates the new TSS is available (not busy). */
2641 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2642 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2643 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
2644
2645 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2646 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
2647 if (rcStrict != VINF_SUCCESS)
2648 {
2649 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2650 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2651 return rcStrict;
2652 }
2653 }
2654
2655 /*
2656 * From this point on, we're technically in the new task. We will defer exceptions
2657 * until the completion of the task switch but before executing any instructions in the new task.
2658 */
2659 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
2660 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
2661 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2662 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
2663 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
2664 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
2665 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
2666
2667 /* Set the busy bit in TR. */
2668 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2669
2670 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2671 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2672 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2673 {
2674 uNewEflags |= X86_EFL_NT;
2675 }
2676
2677 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2678 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
2679 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
2680
2681 pVCpu->cpum.GstCtx.eip = uNewEip;
2682 pVCpu->cpum.GstCtx.eax = uNewEax;
2683 pVCpu->cpum.GstCtx.ecx = uNewEcx;
2684 pVCpu->cpum.GstCtx.edx = uNewEdx;
2685 pVCpu->cpum.GstCtx.ebx = uNewEbx;
2686 pVCpu->cpum.GstCtx.esp = uNewEsp;
2687 pVCpu->cpum.GstCtx.ebp = uNewEbp;
2688 pVCpu->cpum.GstCtx.esi = uNewEsi;
2689 pVCpu->cpum.GstCtx.edi = uNewEdi;
2690
2691 uNewEflags &= X86_EFL_LIVE_MASK;
2692 uNewEflags |= X86_EFL_RA1_MASK;
2693 IEMMISC_SET_EFL(pVCpu, uNewEflags);
2694
2695 /*
2696 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2697 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2698 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2699 */
2700 pVCpu->cpum.GstCtx.es.Sel = uNewES;
2701 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
2702
2703 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2704 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
2705
2706 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2707 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
2708
2709 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
2710 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
2711
2712 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
2713 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
2714
2715 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
2716 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
2717 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2718
2719 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
2720 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2721 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
2722 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
2723
2724 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2725 {
2726 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
2727 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
2728 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
2729 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
2730 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
2731 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
2732 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2733 }
2734
2735 /*
2736 * Switch CR3 for the new task.
2737 */
2738 if ( fIsNewTSS386
2739 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
2740 {
2741 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2742 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
2743 AssertRCSuccessReturn(rc, rc);
2744
2745 /* Inform PGM. */
2746 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
2747 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
2748 AssertRCReturn(rc, rc);
2749 /* ignore informational status codes */
2750
2751 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
2752 }
2753
2754 /*
2755 * Switch LDTR for the new task.
2756 */
2757 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2758 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
2759 else
2760 {
2761 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2762
2763 IEMSELDESC DescNewLdt;
2764 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2765 if (rcStrict != VINF_SUCCESS)
2766 {
2767 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2768 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2769 return rcStrict;
2770 }
2771 if ( !DescNewLdt.Legacy.Gen.u1Present
2772 || DescNewLdt.Legacy.Gen.u1DescType
2773 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2774 {
2775 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2776 uNewLdt, DescNewLdt.Legacy.u));
2777 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2778 }
2779
2780 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
2781 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2782 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2783 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2784 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2785 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2786 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2787 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
2788 }
2789
2790 IEMSELDESC DescSS;
2791 if (IEM_IS_V86_MODE(pVCpu))
2792 {
2793 IEM_SET_CPL(pVCpu, 3);
2794 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
2795 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
2796 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
2797 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
2798 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
2799 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
2800
2801 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
2802 DescSS.Legacy.u = 0;
2803 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
2804 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
2805 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
2806 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
2807 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
2808 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
2809 DescSS.Legacy.Gen.u2Dpl = 3;
2810 }
2811 else
2812 {
2813 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
2814
2815 /*
2816 * Load the stack segment for the new task.
2817 */
2818 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2819 {
2820 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2821 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2822 }
2823
2824 /* Fetch the descriptor. */
2825 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
2826 if (rcStrict != VINF_SUCCESS)
2827 {
2828 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2829 VBOXSTRICTRC_VAL(rcStrict)));
2830 return rcStrict;
2831 }
2832
2833 /* SS must be a data segment and writable. */
2834 if ( !DescSS.Legacy.Gen.u1DescType
2835 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2836 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2837 {
2838 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2839 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2840 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2841 }
2842
2843 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2844 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2845 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2846 {
2847 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2848 uNewCpl));
2849 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2850 }
2851
2852 /* Is it there? */
2853 if (!DescSS.Legacy.Gen.u1Present)
2854 {
2855 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
2856 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2857 }
2858
2859 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
2860 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
2861
2862 /* Set the accessed bit before committing the result into SS. */
2863 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2864 {
2865 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
2866 if (rcStrict != VINF_SUCCESS)
2867 return rcStrict;
2868 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2869 }
2870
2871 /* Commit SS. */
2872 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2873 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
2874 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2875 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
2876 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
2877 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2878 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
2879
2880 /* CPL has changed, update IEM before loading rest of segments. */
2881 IEM_SET_CPL(pVCpu, uNewCpl);
2882
2883 /*
2884 * Load the data segments for the new task.
2885 */
2886 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
2887 if (rcStrict != VINF_SUCCESS)
2888 return rcStrict;
2889 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
2890 if (rcStrict != VINF_SUCCESS)
2891 return rcStrict;
2892 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
2893 if (rcStrict != VINF_SUCCESS)
2894 return rcStrict;
2895 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
2896 if (rcStrict != VINF_SUCCESS)
2897 return rcStrict;
2898
2899 /*
2900 * Load the code segment for the new task.
2901 */
2902 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
2903 {
2904 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
2905 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2906 }
2907
2908 /* Fetch the descriptor. */
2909 IEMSELDESC DescCS;
2910 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
2911 if (rcStrict != VINF_SUCCESS)
2912 {
2913 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
2914 return rcStrict;
2915 }
2916
2917 /* CS must be a code segment. */
2918 if ( !DescCS.Legacy.Gen.u1DescType
2919 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2920 {
2921 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
2922 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
2923 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2924 }
2925
2926 /* For conforming CS, DPL must be less than or equal to the RPL. */
2927 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2928 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
2929 {
2930 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
2931 DescCS.Legacy.Gen.u2Dpl));
2932 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2933 }
2934
2935 /* For non-conforming CS, DPL must match RPL. */
2936 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2937 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
2938 {
2939 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
2940 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
2941 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2942 }
2943
2944 /* Is it there? */
2945 if (!DescCS.Legacy.Gen.u1Present)
2946 {
2947 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
2948 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2949 }
2950
2951 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
2952 u64Base = X86DESC_BASE(&DescCS.Legacy);
2953
2954 /* Set the accessed bit before committing the result into CS. */
2955 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2956 {
2957 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
2958 if (rcStrict != VINF_SUCCESS)
2959 return rcStrict;
2960 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2961 }
2962
2963 /* Commit CS. */
2964 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2965 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
2966 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2967 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
2968 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2969 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2970 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
2971 }
2972
2973 /* Make sure the CPU mode is correct. */
2974 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
2975 if (fExecNew != pVCpu->iem.s.fExec)
2976 Log(("iemTaskSwitch: fExec %#x -> %#x (xor %#x)\n", pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
2977 pVCpu->iem.s.fExec = fExecNew;
2978
2979 /** @todo Debug trap. */
2980 if (fIsNewTSS386 && fNewDebugTrap)
2981 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
2982
2983 /*
2984 * Construct the error code masks based on what caused this task switch.
2985 * See Intel Instruction reference for INT.
2986 */
2987 uint16_t uExt;
2988 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
2989 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2990 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
2991 uExt = 1;
2992 else
2993 uExt = 0;
2994
2995 /*
2996 * Push any error code on to the new stack.
2997 */
2998 if (fFlags & IEM_XCPT_FLAGS_ERR)
2999 {
3000 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3001 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3002 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
3003
3004 /* Check that there is sufficient space on the stack. */
3005 /** @todo Factor out segment limit checking for normal/expand down segments
3006 * into a separate function. */
3007 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3008 {
3009 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
3010 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
3011 {
3012 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3013 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
3014 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3015 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3016 }
3017 }
3018 else
3019 {
3020 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
3021 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3022 {
3023 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
3024 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3025 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3026 }
3027 }
3028
3029
3030 if (fIsNewTSS386)
3031 rcStrict = iemMemStackPushU32(pVCpu, uErr);
3032 else
3033 rcStrict = iemMemStackPushU16(pVCpu, uErr);
3034 if (rcStrict != VINF_SUCCESS)
3035 {
3036 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
3037 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
3038 return rcStrict;
3039 }
3040 }
3041
3042 /* Check the new EIP against the new CS limit. */
3043 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
3044 {
3045 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
3046 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
3047 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3048 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
3049 }
3050
3051 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3052 pVCpu->cpum.GstCtx.ss.Sel));
3053 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3054}
3055
3056
3057/**
3058 * Implements exceptions and interrupts for protected mode.
3059 *
3060 * @returns VBox strict status code.
3061 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3062 * @param cbInstr The number of bytes to offset rIP by in the return
3063 * address.
3064 * @param u8Vector The interrupt / exception vector number.
3065 * @param fFlags The flags.
3066 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3067 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3068 */
3069static VBOXSTRICTRC
3070iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
3071 uint8_t cbInstr,
3072 uint8_t u8Vector,
3073 uint32_t fFlags,
3074 uint16_t uErr,
3075 uint64_t uCr2) RT_NOEXCEPT
3076{
3077 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3078
3079 /*
3080 * Read the IDT entry.
3081 */
3082 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3083 {
3084 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3085 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3086 }
3087 X86DESC Idte;
3088 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
3089 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
3090 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3091 {
3092 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3093 return rcStrict;
3094 }
3095 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x - from %04x:%08RX64 efl=%#x depth=%d\n",
3096 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3097 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow,
3098 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->iem.s.cXcptRecursions));
3099
3100 /*
3101 * Check the descriptor type, DPL and such.
3102 * ASSUMES this is done in the same order as described for call-gate calls.
3103 */
3104 if (Idte.Gate.u1DescType)
3105 {
3106 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3107 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3108 }
3109 bool fTaskGate = false;
3110 uint8_t f32BitGate = true;
3111 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3112 switch (Idte.Gate.u4Type)
3113 {
3114 case X86_SEL_TYPE_SYS_UNDEFINED:
3115 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3116 case X86_SEL_TYPE_SYS_LDT:
3117 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3118 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3119 case X86_SEL_TYPE_SYS_UNDEFINED2:
3120 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3121 case X86_SEL_TYPE_SYS_UNDEFINED3:
3122 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3123 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3124 case X86_SEL_TYPE_SYS_UNDEFINED4:
3125 {
3126 /** @todo check what actually happens when the type is wrong...
3127 * esp. call gates. */
3128 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3129 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3130 }
3131
3132 case X86_SEL_TYPE_SYS_286_INT_GATE:
3133 f32BitGate = false;
3134 RT_FALL_THRU();
3135 case X86_SEL_TYPE_SYS_386_INT_GATE:
3136 fEflToClear |= X86_EFL_IF;
3137 break;
3138
3139 case X86_SEL_TYPE_SYS_TASK_GATE:
3140 fTaskGate = true;
3141#ifndef IEM_IMPLEMENTS_TASKSWITCH
3142 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3143#endif
3144 break;
3145
3146 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3147 f32BitGate = false;
3148 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3149 break;
3150
3151 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3152 }
3153
3154 /* Check DPL against CPL if applicable. */
3155 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3156 {
3157 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3158 {
3159 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3160 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3161 }
3162 }
3163
3164 /* Is it there? */
3165 if (!Idte.Gate.u1Present)
3166 {
3167 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3168 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3169 }
3170
3171 /* Is it a task-gate? */
3172 if (fTaskGate)
3173 {
3174 /*
3175 * Construct the error code masks based on what caused this task switch.
3176 * See Intel Instruction reference for INT.
3177 */
3178 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3179 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
3180 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3181 RTSEL SelTSS = Idte.Gate.u16Sel;
3182
3183 /*
3184 * Fetch the TSS descriptor in the GDT.
3185 */
3186 IEMSELDESC DescTSS;
3187 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
3188 if (rcStrict != VINF_SUCCESS)
3189 {
3190 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
3191 VBOXSTRICTRC_VAL(rcStrict)));
3192 return rcStrict;
3193 }
3194
3195 /* The TSS descriptor must be a system segment and be available (not busy). */
3196 if ( DescTSS.Legacy.Gen.u1DescType
3197 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3198 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3199 {
3200 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3201 u8Vector, SelTSS, DescTSS.Legacy.au64));
3202 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
3203 }
3204
3205 /* The TSS must be present. */
3206 if (!DescTSS.Legacy.Gen.u1Present)
3207 {
3208 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
3209 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
3210 }
3211
3212 /* Do the actual task switch. */
3213 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3214 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3215 fFlags, uErr, uCr2, SelTSS, &DescTSS);
3216 }
3217
3218 /* A null CS is bad. */
3219 RTSEL NewCS = Idte.Gate.u16Sel;
3220 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3221 {
3222 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3223 return iemRaiseGeneralProtectionFault0(pVCpu);
3224 }
3225
3226 /* Fetch the descriptor for the new CS. */
3227 IEMSELDESC DescCS;
3228 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3229 if (rcStrict != VINF_SUCCESS)
3230 {
3231 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3232 return rcStrict;
3233 }
3234
3235 /* Must be a code segment. */
3236 if (!DescCS.Legacy.Gen.u1DescType)
3237 {
3238 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3239 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3240 }
3241 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3242 {
3243 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3244 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3245 }
3246
3247 /* Don't allow lowering the privilege level. */
3248 /** @todo Does the lowering of privileges apply to software interrupts
3249 * only? This has bearings on the more-privileged or
3250 * same-privilege stack behavior further down. A testcase would
3251 * be nice. */
3252 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3253 {
3254 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3255 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3256 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3257 }
3258
3259 /* Make sure the selector is present. */
3260 if (!DescCS.Legacy.Gen.u1Present)
3261 {
3262 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3263 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3264 }
3265
3266#ifdef LOG_ENABLED
3267 /* If software interrupt, try decode it if logging is enabled and such. */
3268 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3269 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
3270 iemLogSyscallProtModeInt(pVCpu, u8Vector, cbInstr);
3271#endif
3272
3273 /* Check the new EIP against the new CS limit. */
3274 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3275 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3276 ? Idte.Gate.u16OffsetLow
3277 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3278 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3279 if (uNewEip > cbLimitCS)
3280 {
3281 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3282 u8Vector, uNewEip, cbLimitCS, NewCS));
3283 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3284 }
3285 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3286
3287 /* Calc the flag image to push. */
3288 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3289 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3290 fEfl &= ~X86_EFL_RF;
3291 else
3292 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3293
3294 /* From V8086 mode only go to CPL 0. */
3295 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3296 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3297 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3298 {
3299 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3300 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3301 }
3302
3303 /*
3304 * If the privilege level changes, we need to get a new stack from the TSS.
3305 * This in turns means validating the new SS and ESP...
3306 */
3307 if (uNewCpl != IEM_GET_CPL(pVCpu))
3308 {
3309 RTSEL NewSS;
3310 uint32_t uNewEsp;
3311 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3312 if (rcStrict != VINF_SUCCESS)
3313 return rcStrict;
3314
3315 IEMSELDESC DescSS;
3316 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3317 if (rcStrict != VINF_SUCCESS)
3318 return rcStrict;
3319 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3320 if (!DescSS.Legacy.Gen.u1DefBig)
3321 {
3322 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3323 uNewEsp = (uint16_t)uNewEsp;
3324 }
3325
3326 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3327
3328 /* Check that there is sufficient space for the stack frame. */
3329 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3330 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3331 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3332 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3333
3334 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3335 {
3336 if ( uNewEsp - 1 > cbLimitSS
3337 || uNewEsp < cbStackFrame)
3338 {
3339 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3340 u8Vector, NewSS, uNewEsp, cbStackFrame));
3341 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3342 }
3343 }
3344 else
3345 {
3346 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3347 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3348 {
3349 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3350 u8Vector, NewSS, uNewEsp, cbStackFrame));
3351 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3352 }
3353 }
3354
3355 /*
3356 * Start making changes.
3357 */
3358
3359 /* Set the new CPL so that stack accesses use it. */
3360 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3361 IEM_SET_CPL(pVCpu, uNewCpl);
3362
3363 /* Create the stack frame. */
3364 RTPTRUNION uStackFrame;
3365 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3366 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
3367 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3368 if (rcStrict != VINF_SUCCESS)
3369 return rcStrict;
3370 void * const pvStackFrame = uStackFrame.pv;
3371 if (f32BitGate)
3372 {
3373 if (fFlags & IEM_XCPT_FLAGS_ERR)
3374 *uStackFrame.pu32++ = uErr;
3375 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3376 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3377 uStackFrame.pu32[2] = fEfl;
3378 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3379 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3380 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3381 if (fEfl & X86_EFL_VM)
3382 {
3383 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3384 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3385 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3386 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3387 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3388 }
3389 }
3390 else
3391 {
3392 if (fFlags & IEM_XCPT_FLAGS_ERR)
3393 *uStackFrame.pu16++ = uErr;
3394 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3395 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3396 uStackFrame.pu16[2] = fEfl;
3397 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3398 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3399 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3400 if (fEfl & X86_EFL_VM)
3401 {
3402 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3403 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3404 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3405 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3406 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3407 }
3408 }
3409 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3410 if (rcStrict != VINF_SUCCESS)
3411 return rcStrict;
3412
3413 /* Mark the selectors 'accessed' (hope this is the correct time). */
3414 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3415 * after pushing the stack frame? (Write protect the gdt + stack to
3416 * find out.) */
3417 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3418 {
3419 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3420 if (rcStrict != VINF_SUCCESS)
3421 return rcStrict;
3422 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3423 }
3424
3425 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3426 {
3427 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3428 if (rcStrict != VINF_SUCCESS)
3429 return rcStrict;
3430 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3431 }
3432
3433 /*
3434 * Start comitting the register changes (joins with the DPL=CPL branch).
3435 */
3436 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3437 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3438 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3439 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3440 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3441 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3442 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3443 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3444 * SP is loaded).
3445 * Need to check the other combinations too:
3446 * - 16-bit TSS, 32-bit handler
3447 * - 32-bit TSS, 16-bit handler */
3448 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3449 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3450 else
3451 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3452
3453 if (fEfl & X86_EFL_VM)
3454 {
3455 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3456 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3457 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3458 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3459 }
3460 }
3461 /*
3462 * Same privilege, no stack change and smaller stack frame.
3463 */
3464 else
3465 {
3466 uint64_t uNewRsp;
3467 RTPTRUNION uStackFrame;
3468 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3469 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1, &uStackFrame.pv, &uNewRsp);
3470 if (rcStrict != VINF_SUCCESS)
3471 return rcStrict;
3472 void * const pvStackFrame = uStackFrame.pv;
3473
3474 if (f32BitGate)
3475 {
3476 if (fFlags & IEM_XCPT_FLAGS_ERR)
3477 *uStackFrame.pu32++ = uErr;
3478 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3479 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3480 uStackFrame.pu32[2] = fEfl;
3481 }
3482 else
3483 {
3484 if (fFlags & IEM_XCPT_FLAGS_ERR)
3485 *uStackFrame.pu16++ = uErr;
3486 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3487 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3488 uStackFrame.pu16[2] = fEfl;
3489 }
3490 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
3491 if (rcStrict != VINF_SUCCESS)
3492 return rcStrict;
3493
3494 /* Mark the CS selector as 'accessed'. */
3495 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3496 {
3497 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3498 if (rcStrict != VINF_SUCCESS)
3499 return rcStrict;
3500 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3501 }
3502
3503 /*
3504 * Start committing the register changes (joins with the other branch).
3505 */
3506 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3507 }
3508
3509 /* ... register committing continues. */
3510 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3511 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3512 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3513 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3514 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3515 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3516
3517 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3518 fEfl &= ~fEflToClear;
3519 IEMMISC_SET_EFL(pVCpu, fEfl);
3520
3521 if (fFlags & IEM_XCPT_FLAGS_CR2)
3522 pVCpu->cpum.GstCtx.cr2 = uCr2;
3523
3524 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3525 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3526
3527 /* Make sure the execution flags are correct. */
3528 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3529 if (fExecNew != pVCpu->iem.s.fExec)
3530 Log(("iemRaiseXcptOrIntInProtMode: fExec %#x -> %#x (xor %#x)\n",
3531 pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3532 pVCpu->iem.s.fExec = fExecNew;
3533 Assert(IEM_GET_CPL(pVCpu) == uNewCpl);
3534
3535 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3536}
3537
3538
3539/**
3540 * Implements exceptions and interrupts for long mode.
3541 *
3542 * @returns VBox strict status code.
3543 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3544 * @param cbInstr The number of bytes to offset rIP by in the return
3545 * address.
3546 * @param u8Vector The interrupt / exception vector number.
3547 * @param fFlags The flags.
3548 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3549 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3550 */
3551static VBOXSTRICTRC
3552iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3553 uint8_t cbInstr,
3554 uint8_t u8Vector,
3555 uint32_t fFlags,
3556 uint16_t uErr,
3557 uint64_t uCr2) RT_NOEXCEPT
3558{
3559 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3560
3561 /*
3562 * Read the IDT entry.
3563 */
3564 uint16_t offIdt = (uint16_t)u8Vector << 4;
3565 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3566 {
3567 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3568 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3569 }
3570 X86DESC64 Idte;
3571#ifdef _MSC_VER /* Shut up silly compiler warning. */
3572 Idte.au64[0] = 0;
3573 Idte.au64[1] = 0;
3574#endif
3575 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3576 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3577 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3578 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3579 {
3580 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3581 return rcStrict;
3582 }
3583 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3584 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3585 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3586
3587 /*
3588 * Check the descriptor type, DPL and such.
3589 * ASSUMES this is done in the same order as described for call-gate calls.
3590 */
3591 if (Idte.Gate.u1DescType)
3592 {
3593 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3594 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3595 }
3596 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3597 switch (Idte.Gate.u4Type)
3598 {
3599 case AMD64_SEL_TYPE_SYS_INT_GATE:
3600 fEflToClear |= X86_EFL_IF;
3601 break;
3602 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3603 break;
3604
3605 default:
3606 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3607 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3608 }
3609
3610 /* Check DPL against CPL if applicable. */
3611 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3612 {
3613 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3614 {
3615 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3616 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3617 }
3618 }
3619
3620 /* Is it there? */
3621 if (!Idte.Gate.u1Present)
3622 {
3623 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3624 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3625 }
3626
3627 /* A null CS is bad. */
3628 RTSEL NewCS = Idte.Gate.u16Sel;
3629 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3630 {
3631 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3632 return iemRaiseGeneralProtectionFault0(pVCpu);
3633 }
3634
3635 /* Fetch the descriptor for the new CS. */
3636 IEMSELDESC DescCS;
3637 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
3638 if (rcStrict != VINF_SUCCESS)
3639 {
3640 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3641 return rcStrict;
3642 }
3643
3644 /* Must be a 64-bit code segment. */
3645 if (!DescCS.Long.Gen.u1DescType)
3646 {
3647 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3648 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3649 }
3650 if ( !DescCS.Long.Gen.u1Long
3651 || DescCS.Long.Gen.u1DefBig
3652 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3653 {
3654 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3655 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3656 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3657 }
3658
3659 /* Don't allow lowering the privilege level. For non-conforming CS
3660 selectors, the CS.DPL sets the privilege level the trap/interrupt
3661 handler runs at. For conforming CS selectors, the CPL remains
3662 unchanged, but the CS.DPL must be <= CPL. */
3663 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3664 * when CPU in Ring-0. Result \#GP? */
3665 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3666 {
3667 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3668 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3669 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3670 }
3671
3672
3673 /* Make sure the selector is present. */
3674 if (!DescCS.Legacy.Gen.u1Present)
3675 {
3676 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3677 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3678 }
3679
3680 /* Check that the new RIP is canonical. */
3681 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3682 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3683 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3684 if (!IEM_IS_CANONICAL(uNewRip))
3685 {
3686 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3687 return iemRaiseGeneralProtectionFault0(pVCpu);
3688 }
3689
3690 /*
3691 * If the privilege level changes or if the IST isn't zero, we need to get
3692 * a new stack from the TSS.
3693 */
3694 uint64_t uNewRsp;
3695 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3696 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3697 if ( uNewCpl != IEM_GET_CPL(pVCpu)
3698 || Idte.Gate.u3IST != 0)
3699 {
3700 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3701 if (rcStrict != VINF_SUCCESS)
3702 return rcStrict;
3703 }
3704 else
3705 uNewRsp = pVCpu->cpum.GstCtx.rsp;
3706 uNewRsp &= ~(uint64_t)0xf;
3707
3708 /*
3709 * Calc the flag image to push.
3710 */
3711 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3712 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3713 fEfl &= ~X86_EFL_RF;
3714 else
3715 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3716
3717 /*
3718 * Start making changes.
3719 */
3720 /* Set the new CPL so that stack accesses use it. */
3721 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3722 IEM_SET_CPL(pVCpu, uNewCpl);
3723/** @todo Setting CPL this early seems wrong as it would affect and errors we
3724 * raise accessing the stack and (?) GDT/LDT... */
3725
3726 /* Create the stack frame. */
3727 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3728 RTPTRUNION uStackFrame;
3729 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3730 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3731 if (rcStrict != VINF_SUCCESS)
3732 return rcStrict;
3733 void * const pvStackFrame = uStackFrame.pv;
3734
3735 if (fFlags & IEM_XCPT_FLAGS_ERR)
3736 *uStackFrame.pu64++ = uErr;
3737 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
3738 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
3739 uStackFrame.pu64[2] = fEfl;
3740 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
3741 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
3742 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3743 if (rcStrict != VINF_SUCCESS)
3744 return rcStrict;
3745
3746 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3747 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3748 * after pushing the stack frame? (Write protect the gdt + stack to
3749 * find out.) */
3750 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3751 {
3752 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3753 if (rcStrict != VINF_SUCCESS)
3754 return rcStrict;
3755 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3756 }
3757
3758 /*
3759 * Start comitting the register changes.
3760 */
3761 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3762 * hidden registers when interrupting 32-bit or 16-bit code! */
3763 if (uNewCpl != uOldCpl)
3764 {
3765 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
3766 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
3767 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3768 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3769 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3770 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3771 }
3772 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
3773 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3774 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3775 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3776 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3777 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3778 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3779 pVCpu->cpum.GstCtx.rip = uNewRip;
3780
3781 fEfl &= ~fEflToClear;
3782 IEMMISC_SET_EFL(pVCpu, fEfl);
3783
3784 if (fFlags & IEM_XCPT_FLAGS_CR2)
3785 pVCpu->cpum.GstCtx.cr2 = uCr2;
3786
3787 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3788 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3789
3790 iemRecalcExecModeAndCplFlags(pVCpu);
3791
3792 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3793}
3794
3795
3796/**
3797 * Implements exceptions and interrupts.
3798 *
3799 * All exceptions and interrupts goes thru this function!
3800 *
3801 * @returns VBox strict status code.
3802 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3803 * @param cbInstr The number of bytes to offset rIP by in the return
3804 * address.
3805 * @param u8Vector The interrupt / exception vector number.
3806 * @param fFlags The flags.
3807 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3808 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3809 */
3810VBOXSTRICTRC
3811iemRaiseXcptOrInt(PVMCPUCC pVCpu,
3812 uint8_t cbInstr,
3813 uint8_t u8Vector,
3814 uint32_t fFlags,
3815 uint16_t uErr,
3816 uint64_t uCr2) RT_NOEXCEPT
3817{
3818 /*
3819 * Get all the state that we might need here.
3820 */
3821 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3822 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3823
3824#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
3825 /*
3826 * Flush prefetch buffer
3827 */
3828 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3829#endif
3830
3831 /*
3832 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3833 */
3834 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
3835 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
3836 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
3837 | IEM_XCPT_FLAGS_BP_INSTR
3838 | IEM_XCPT_FLAGS_ICEBP_INSTR
3839 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3840 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
3841 {
3842 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3843 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3844 u8Vector = X86_XCPT_GP;
3845 uErr = 0;
3846 }
3847#ifdef DBGFTRACE_ENABLED
3848 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3849 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3850 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
3851#endif
3852
3853 /*
3854 * Evaluate whether NMI blocking should be in effect.
3855 * Normally, NMI blocking is in effect whenever we inject an NMI.
3856 */
3857 bool fBlockNmi = u8Vector == X86_XCPT_NMI
3858 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);
3859
3860#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3861 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3862 {
3863 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
3864 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3865 return rcStrict0;
3866
3867 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
3868 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
3869 {
3870 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
3871 fBlockNmi = false;
3872 }
3873 }
3874#endif
3875
3876#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3877 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
3878 {
3879 /*
3880 * If the event is being injected as part of VMRUN, it isn't subject to event
3881 * intercepts in the nested-guest. However, secondary exceptions that occur
3882 * during injection of any event -are- subject to exception intercepts.
3883 *
3884 * See AMD spec. 15.20 "Event Injection".
3885 */
3886 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
3887 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
3888 else
3889 {
3890 /*
3891 * Check and handle if the event being raised is intercepted.
3892 */
3893 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3894 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
3895 return rcStrict0;
3896 }
3897 }
3898#endif
3899
3900 /*
3901 * Set NMI blocking if necessary.
3902 */
3903 if (fBlockNmi)
3904 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
3905
3906 /*
3907 * Do recursion accounting.
3908 */
3909 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
3910 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
3911 if (pVCpu->iem.s.cXcptRecursions == 0)
3912 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3913 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
3914 else
3915 {
3916 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3917 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
3918 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
3919
3920 if (pVCpu->iem.s.cXcptRecursions >= 4)
3921 {
3922#ifdef DEBUG_bird
3923 AssertFailed();
3924#endif
3925 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3926 }
3927
3928 /*
3929 * Evaluate the sequence of recurring events.
3930 */
3931 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
3932 NULL /* pXcptRaiseInfo */);
3933 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
3934 { /* likely */ }
3935 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
3936 {
3937 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
3938 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3939 u8Vector = X86_XCPT_DF;
3940 uErr = 0;
3941#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3942 /* VMX nested-guest #DF intercept needs to be checked here. */
3943 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3944 {
3945 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
3946 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3947 return rcStrict0;
3948 }
3949#endif
3950 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
3951 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
3952 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3953 }
3954 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
3955 {
3956 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
3957 return iemInitiateCpuShutdown(pVCpu);
3958 }
3959 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
3960 {
3961 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
3962 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
3963 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
3964 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
3965 return VERR_EM_GUEST_CPU_HANG;
3966 }
3967 else
3968 {
3969 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
3970 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
3971 return VERR_IEM_IPE_9;
3972 }
3973
3974 /*
3975 * The 'EXT' bit is set when an exception occurs during deliver of an external
3976 * event (such as an interrupt or earlier exception)[1]. Privileged software
3977 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
3978 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
3979 *
3980 * [1] - Intel spec. 6.13 "Error Code"
3981 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
3982 * [3] - Intel Instruction reference for INT n.
3983 */
3984 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
3985 && (fFlags & IEM_XCPT_FLAGS_ERR)
3986 && u8Vector != X86_XCPT_PF
3987 && u8Vector != X86_XCPT_DF)
3988 {
3989 uErr |= X86_TRAP_ERR_EXTERNAL;
3990 }
3991 }
3992
3993 pVCpu->iem.s.cXcptRecursions++;
3994 pVCpu->iem.s.uCurXcpt = u8Vector;
3995 pVCpu->iem.s.fCurXcpt = fFlags;
3996 pVCpu->iem.s.uCurXcptErr = uErr;
3997 pVCpu->iem.s.uCurXcptCr2 = uCr2;
3998
3999 /*
4000 * Extensive logging.
4001 */
4002#if defined(LOG_ENABLED) && defined(IN_RING3)
4003 if (LogIs3Enabled())
4004 {
4005 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
4006 PVM pVM = pVCpu->CTX_SUFF(pVM);
4007 char szRegs[4096];
4008 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4009 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4010 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4011 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4012 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4013 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4014 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4015 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4016 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4017 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4018 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4019 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4020 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4021 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4022 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4023 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4024 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4025 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4026 " efer=%016VR{efer}\n"
4027 " pat=%016VR{pat}\n"
4028 " sf_mask=%016VR{sf_mask}\n"
4029 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4030 " lstar=%016VR{lstar}\n"
4031 " star=%016VR{star} cstar=%016VR{cstar}\n"
4032 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4033 );
4034
4035 char szInstr[256];
4036 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4037 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4038 szInstr, sizeof(szInstr), NULL);
4039 Log3(("%s%s\n", szRegs, szInstr));
4040 }
4041#endif /* LOG_ENABLED */
4042
4043 /*
4044 * Stats.
4045 */
4046 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
4047 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
4048 else if (u8Vector <= X86_XCPT_LAST)
4049 {
4050 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
4051 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
4052 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, ASMReadTSC());
4053 }
4054
4055 /*
4056 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
4057 * to ensure that a stale TLB or paging cache entry will only cause one
4058 * spurious #PF.
4059 */
4060 if ( u8Vector == X86_XCPT_PF
4061 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
4062 IEMTlbInvalidatePage(pVCpu, uCr2);
4063
4064 /*
4065 * Call the mode specific worker function.
4066 */
4067 VBOXSTRICTRC rcStrict;
4068 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4069 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4070 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
4071 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4072 else
4073 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4074
4075 /* Flush the prefetch buffer. */
4076 iemOpcodeFlushHeavy(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
4077
4078 /*
4079 * Unwind.
4080 */
4081 pVCpu->iem.s.cXcptRecursions--;
4082 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
4083 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
4084 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
4085 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel,
4086 pVCpu->cpum.GstCtx.esp, IEM_GET_CPL(pVCpu), pVCpu->iem.s.cXcptRecursions + 1));
4087 return rcStrict;
4088}
4089
4090#ifdef IEM_WITH_SETJMP
4091/**
4092 * See iemRaiseXcptOrInt. Will not return.
4093 */
4094DECL_NO_RETURN(void)
4095iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
4096 uint8_t cbInstr,
4097 uint8_t u8Vector,
4098 uint32_t fFlags,
4099 uint16_t uErr,
4100 uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP
4101{
4102 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4103 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
4104}
4105#endif
4106
4107
4108/** \#DE - 00. */
4109VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
4110{
4111 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4112}
4113
4114
4115/** \#DB - 01.
4116 * @note This automatically clear DR7.GD. */
4117VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
4118{
4119 /* This always clears RF (via IEM_XCPT_FLAGS_DRx_INSTR_BP). */
4120 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
4121 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_DRx_INSTR_BP, 0, 0);
4122}
4123
4124
4125/** \#BR - 05. */
4126VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
4127{
4128 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4129}
4130
4131
4132/** \#UD - 06. */
4133VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
4134{
4135 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4136}
4137
4138
4139/** \#NM - 07. */
4140VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
4141{
4142 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4143}
4144
4145
4146/** \#TS(err) - 0a. */
4147VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4148{
4149 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4150}
4151
4152
4153/** \#TS(tr) - 0a. */
4154VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
4155{
4156 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4157 pVCpu->cpum.GstCtx.tr.Sel, 0);
4158}
4159
4160
4161/** \#TS(0) - 0a. */
4162VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4163{
4164 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4165 0, 0);
4166}
4167
4168
4169/** \#TS(err) - 0a. */
4170VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4171{
4172 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4173 uSel & X86_SEL_MASK_OFF_RPL, 0);
4174}
4175
4176
4177/** \#NP(err) - 0b. */
4178VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4179{
4180 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4181}
4182
4183
4184/** \#NP(sel) - 0b. */
4185VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4186{
4187 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4188 uSel & ~X86_SEL_RPL, 0);
4189}
4190
4191
4192/** \#SS(seg) - 0c. */
4193VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4194{
4195 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4196 uSel & ~X86_SEL_RPL, 0);
4197}
4198
4199
4200/** \#SS(err) - 0c. */
4201VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4202{
4203 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4204}
4205
4206
4207/** \#GP(n) - 0d. */
4208VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4209{
4210 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4211}
4212
4213
4214/** \#GP(0) - 0d. */
4215VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4216{
4217 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4218}
4219
4220#ifdef IEM_WITH_SETJMP
4221/** \#GP(0) - 0d. */
4222DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4223{
4224 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4225}
4226#endif
4227
4228
4229/** \#GP(sel) - 0d. */
4230VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4231{
4232 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4233 Sel & ~X86_SEL_RPL, 0);
4234}
4235
4236
4237/** \#GP(0) - 0d. */
4238VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4239{
4240 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4241}
4242
4243
4244/** \#GP(sel) - 0d. */
4245VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4246{
4247 NOREF(iSegReg); NOREF(fAccess);
4248 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4249 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4250}
4251
4252#ifdef IEM_WITH_SETJMP
4253/** \#GP(sel) - 0d, longjmp. */
4254DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4255{
4256 NOREF(iSegReg); NOREF(fAccess);
4257 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4258 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4259}
4260#endif
4261
4262/** \#GP(sel) - 0d. */
4263VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4264{
4265 NOREF(Sel);
4266 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4267}
4268
4269#ifdef IEM_WITH_SETJMP
4270/** \#GP(sel) - 0d, longjmp. */
4271DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP
4272{
4273 NOREF(Sel);
4274 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4275}
4276#endif
4277
4278
4279/** \#GP(sel) - 0d. */
4280VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4281{
4282 NOREF(iSegReg); NOREF(fAccess);
4283 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4284}
4285
4286#ifdef IEM_WITH_SETJMP
4287/** \#GP(sel) - 0d, longjmp. */
4288DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4289{
4290 NOREF(iSegReg); NOREF(fAccess);
4291 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4292}
4293#endif
4294
4295
4296/** \#PF(n) - 0e. */
4297VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT
4298{
4299 uint16_t uErr;
4300 switch (rc)
4301 {
4302 case VERR_PAGE_NOT_PRESENT:
4303 case VERR_PAGE_TABLE_NOT_PRESENT:
4304 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4305 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4306 uErr = 0;
4307 break;
4308
4309 default:
4310 AssertMsgFailed(("%Rrc\n", rc));
4311 RT_FALL_THRU();
4312 case VERR_ACCESS_DENIED:
4313 uErr = X86_TRAP_PF_P;
4314 break;
4315
4316 /** @todo reserved */
4317 }
4318
4319 if (IEM_GET_CPL(pVCpu) == 3)
4320 uErr |= X86_TRAP_PF_US;
4321
4322 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4323 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4324 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4325 uErr |= X86_TRAP_PF_ID;
4326
4327#if 0 /* This is so much non-sense, really. Why was it done like that? */
4328 /* Note! RW access callers reporting a WRITE protection fault, will clear
4329 the READ flag before calling. So, read-modify-write accesses (RW)
4330 can safely be reported as READ faults. */
4331 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4332 uErr |= X86_TRAP_PF_RW;
4333#else
4334 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4335 {
4336 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4337 /// (regardless of outcome of the comparison in the latter case).
4338 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4339 uErr |= X86_TRAP_PF_RW;
4340 }
4341#endif
4342
4343 /* For FXSAVE and FRSTOR the #PF is typically reported at the max address
4344 of the memory operand rather than at the start of it. (Not sure what
4345 happens if it crosses a page boundrary.) The current heuristics for
4346 this is to report the #PF for the last byte if the access is more than
4347 64 bytes. This is probably not correct, but we can work that out later,
4348 main objective now is to get FXSAVE to work like for real hardware and
4349 make bs3-cpu-basic2 work. */
4350 if (cbAccess <= 64)
4351 { /* likely*/ }
4352 else
4353 GCPtrWhere += cbAccess - 1;
4354
4355 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4356 uErr, GCPtrWhere);
4357}
4358
4359#ifdef IEM_WITH_SETJMP
4360/** \#PF(n) - 0e, longjmp. */
4361DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess,
4362 uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP
4363{
4364 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, cbAccess, fAccess, rc)));
4365}
4366#endif
4367
4368
4369/** \#MF(0) - 10. */
4370VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT
4371{
4372 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)
4373 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4374
4375 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
4376 PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
4377 return iemRegUpdateRipAndFinishClearingRF(pVCpu);
4378}
4379
4380
4381/** \#AC(0) - 11. */
4382VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT
4383{
4384 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4385}
4386
4387#ifdef IEM_WITH_SETJMP
4388/** \#AC(0) - 11, longjmp. */
4389DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4390{
4391 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4392}
4393#endif
4394
4395
4396/** \#XF(0)/\#XM(0) - 19. */
4397VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
4398{
4399 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4400}
4401
4402
4403/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4404IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4405{
4406 NOREF(cbInstr);
4407 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4408}
4409
4410
4411/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4412IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4413{
4414 NOREF(cbInstr);
4415 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4416}
4417
4418
4419/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4420IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4421{
4422 NOREF(cbInstr);
4423 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4424}
4425
4426
4427/** @} */
4428
4429/** @name Common opcode decoders.
4430 * @{
4431 */
4432//#include <iprt/mem.h>
4433
4434/**
4435 * Used to add extra details about a stub case.
4436 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4437 */
4438void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4439{
4440#if defined(LOG_ENABLED) && defined(IN_RING3)
4441 PVM pVM = pVCpu->CTX_SUFF(pVM);
4442 char szRegs[4096];
4443 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4444 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4445 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4446 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4447 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4448 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4449 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4450 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4451 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4452 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4453 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4454 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4455 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4456 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4457 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4458 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4459 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4460 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4461 " efer=%016VR{efer}\n"
4462 " pat=%016VR{pat}\n"
4463 " sf_mask=%016VR{sf_mask}\n"
4464 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4465 " lstar=%016VR{lstar}\n"
4466 " star=%016VR{star} cstar=%016VR{cstar}\n"
4467 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4468 );
4469
4470 char szInstr[256];
4471 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4472 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4473 szInstr, sizeof(szInstr), NULL);
4474
4475 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4476#else
4477 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4478#endif
4479}
4480
4481/** @} */
4482
4483
4484
4485/** @name Register Access.
4486 * @{
4487 */
4488
4489/**
4490 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4491 *
4492 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4493 * segment limit.
4494 *
4495 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4496 * @param cbInstr Instruction size.
4497 * @param offNextInstr The offset of the next instruction.
4498 * @param enmEffOpSize Effective operand size.
4499 */
4500VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
4501 IEMMODE enmEffOpSize) RT_NOEXCEPT
4502{
4503 switch (enmEffOpSize)
4504 {
4505 case IEMMODE_16BIT:
4506 {
4507 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
4508 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4509 || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */))
4510 pVCpu->cpum.GstCtx.rip = uNewIp;
4511 else
4512 return iemRaiseGeneralProtectionFault0(pVCpu);
4513 break;
4514 }
4515
4516 case IEMMODE_32BIT:
4517 {
4518 Assert(!IEM_IS_64BIT_CODE(pVCpu));
4519 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4520
4521 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
4522 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4523 pVCpu->cpum.GstCtx.rip = uNewEip;
4524 else
4525 return iemRaiseGeneralProtectionFault0(pVCpu);
4526 break;
4527 }
4528
4529 case IEMMODE_64BIT:
4530 {
4531 Assert(IEM_IS_64BIT_CODE(pVCpu));
4532
4533 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4534 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4535 pVCpu->cpum.GstCtx.rip = uNewRip;
4536 else
4537 return iemRaiseGeneralProtectionFault0(pVCpu);
4538 break;
4539 }
4540
4541 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4542 }
4543
4544#ifndef IEM_WITH_CODE_TLB
4545 /* Flush the prefetch buffer. */
4546 pVCpu->iem.s.cbOpcode = cbInstr;
4547#endif
4548
4549 /*
4550 * Clear RF and finish the instruction (maybe raise #DB).
4551 */
4552 return iemRegFinishClearingRF(pVCpu);
4553}
4554
4555
4556/**
4557 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4558 *
4559 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4560 * segment limit.
4561 *
4562 * @returns Strict VBox status code.
4563 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4564 * @param cbInstr Instruction size.
4565 * @param offNextInstr The offset of the next instruction.
4566 */
4567VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT
4568{
4569 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
4570
4571 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
4572 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4573 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checking in 64-bit mode */))
4574 pVCpu->cpum.GstCtx.rip = uNewIp;
4575 else
4576 return iemRaiseGeneralProtectionFault0(pVCpu);
4577
4578#ifndef IEM_WITH_CODE_TLB
4579 /* Flush the prefetch buffer. */
4580 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4581#endif
4582
4583 /*
4584 * Clear RF and finish the instruction (maybe raise #DB).
4585 */
4586 return iemRegFinishClearingRF(pVCpu);
4587}
4588
4589
4590/**
4591 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4592 *
4593 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4594 * segment limit.
4595 *
4596 * @returns Strict VBox status code.
4597 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4598 * @param cbInstr Instruction size.
4599 * @param offNextInstr The offset of the next instruction.
4600 * @param enmEffOpSize Effective operand size.
4601 */
4602VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
4603 IEMMODE enmEffOpSize) RT_NOEXCEPT
4604{
4605 if (enmEffOpSize == IEMMODE_32BIT)
4606 {
4607 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
4608
4609 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
4610 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4611 pVCpu->cpum.GstCtx.rip = uNewEip;
4612 else
4613 return iemRaiseGeneralProtectionFault0(pVCpu);
4614 }
4615 else
4616 {
4617 Assert(enmEffOpSize == IEMMODE_64BIT);
4618
4619 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4620 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4621 pVCpu->cpum.GstCtx.rip = uNewRip;
4622 else
4623 return iemRaiseGeneralProtectionFault0(pVCpu);
4624 }
4625
4626#ifndef IEM_WITH_CODE_TLB
4627 /* Flush the prefetch buffer. */
4628 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4629#endif
4630
4631 /*
4632 * Clear RF and finish the instruction (maybe raise #DB).
4633 */
4634 return iemRegFinishClearingRF(pVCpu);
4635}
4636
4637
4638/**
4639 * Performs a near jump to the specified address.
4640 *
4641 * May raise a \#GP(0) if the new IP outside the code segment limit.
4642 *
4643 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4644 * @param uNewIp The new IP value.
4645 */
4646VBOXSTRICTRC iemRegRipJumpU16AndFinishClearningRF(PVMCPUCC pVCpu, uint16_t uNewIp) RT_NOEXCEPT
4647{
4648 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4649 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checks in 64-bit mode */))
4650 pVCpu->cpum.GstCtx.rip = uNewIp;
4651 else
4652 return iemRaiseGeneralProtectionFault0(pVCpu);
4653 /** @todo Test 16-bit jump in 64-bit mode. */
4654
4655#ifndef IEM_WITH_CODE_TLB
4656 /* Flush the prefetch buffer. */
4657 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4658#endif
4659
4660 /*
4661 * Clear RF and finish the instruction (maybe raise #DB).
4662 */
4663 return iemRegFinishClearingRF(pVCpu);
4664}
4665
4666
4667/**
4668 * Performs a near jump to the specified address.
4669 *
4670 * May raise a \#GP(0) if the new RIP is outside the code segment limit.
4671 *
4672 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4673 * @param uNewEip The new EIP value.
4674 */
4675VBOXSTRICTRC iemRegRipJumpU32AndFinishClearningRF(PVMCPUCC pVCpu, uint32_t uNewEip) RT_NOEXCEPT
4676{
4677 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4678 Assert(!IEM_IS_64BIT_CODE(pVCpu));
4679
4680 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4681 pVCpu->cpum.GstCtx.rip = uNewEip;
4682 else
4683 return iemRaiseGeneralProtectionFault0(pVCpu);
4684
4685#ifndef IEM_WITH_CODE_TLB
4686 /* Flush the prefetch buffer. */
4687 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4688#endif
4689
4690 /*
4691 * Clear RF and finish the instruction (maybe raise #DB).
4692 */
4693 return iemRegFinishClearingRF(pVCpu);
4694}
4695
4696
4697/**
4698 * Performs a near jump to the specified address.
4699 *
4700 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4701 * segment limit.
4702 *
4703 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4704 * @param uNewRip The new RIP value.
4705 */
4706VBOXSTRICTRC iemRegRipJumpU64AndFinishClearningRF(PVMCPUCC pVCpu, uint64_t uNewRip) RT_NOEXCEPT
4707{
4708 Assert(IEM_IS_64BIT_CODE(pVCpu));
4709
4710 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4711 pVCpu->cpum.GstCtx.rip = uNewRip;
4712 else
4713 return iemRaiseGeneralProtectionFault0(pVCpu);
4714
4715#ifndef IEM_WITH_CODE_TLB
4716 /* Flush the prefetch buffer. */
4717 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4718#endif
4719
4720 /*
4721 * Clear RF and finish the instruction (maybe raise #DB).
4722 */
4723 return iemRegFinishClearingRF(pVCpu);
4724}
4725
4726/** @} */
4727
4728
4729/** @name FPU access and helpers.
4730 *
4731 * @{
4732 */
4733
4734/**
4735 * Updates the x87.DS and FPUDP registers.
4736 *
4737 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4738 * @param pFpuCtx The FPU context.
4739 * @param iEffSeg The effective segment register.
4740 * @param GCPtrEff The effective address relative to @a iEffSeg.
4741 */
4742DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4743{
4744 RTSEL sel;
4745 switch (iEffSeg)
4746 {
4747 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
4748 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
4749 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
4750 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
4751 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
4752 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
4753 default:
4754 AssertMsgFailed(("%d\n", iEffSeg));
4755 sel = pVCpu->cpum.GstCtx.ds.Sel;
4756 }
4757 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
4758 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4759 {
4760 pFpuCtx->DS = 0;
4761 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
4762 }
4763 else if (!IEM_IS_LONG_MODE(pVCpu)) /** @todo this is weird. explain. */
4764 {
4765 pFpuCtx->DS = sel;
4766 pFpuCtx->FPUDP = GCPtrEff;
4767 }
4768 else
4769 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
4770}
4771
4772
4773/**
4774 * Rotates the stack registers in the push direction.
4775 *
4776 * @param pFpuCtx The FPU context.
4777 * @remarks This is a complete waste of time, but fxsave stores the registers in
4778 * stack order.
4779 */
4780DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
4781{
4782 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
4783 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
4784 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
4785 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
4786 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
4787 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
4788 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
4789 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
4790 pFpuCtx->aRegs[0].r80 = r80Tmp;
4791}
4792
4793
4794/**
4795 * Rotates the stack registers in the pop direction.
4796 *
4797 * @param pFpuCtx The FPU context.
4798 * @remarks This is a complete waste of time, but fxsave stores the registers in
4799 * stack order.
4800 */
4801DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
4802{
4803 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
4804 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
4805 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
4806 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
4807 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
4808 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
4809 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
4810 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
4811 pFpuCtx->aRegs[7].r80 = r80Tmp;
4812}
4813
4814
4815/**
4816 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
4817 * exception prevents it.
4818 *
4819 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4820 * @param pResult The FPU operation result to push.
4821 * @param pFpuCtx The FPU context.
4822 */
4823static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4824{
4825 /* Update FSW and bail if there are pending exceptions afterwards. */
4826 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4827 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4828 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4829 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4830 {
4831 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))
4832 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",
4833 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4834 pFpuCtx->FSW = fFsw;
4835 return;
4836 }
4837
4838 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4839 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4840 {
4841 /* All is fine, push the actual value. */
4842 pFpuCtx->FTW |= RT_BIT(iNewTop);
4843 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
4844 }
4845 else if (pFpuCtx->FCW & X86_FCW_IM)
4846 {
4847 /* Masked stack overflow, push QNaN. */
4848 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4849 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4850 }
4851 else
4852 {
4853 /* Raise stack overflow, don't push anything. */
4854 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4855 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4856 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",
4857 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4858 return;
4859 }
4860
4861 fFsw &= ~X86_FSW_TOP_MASK;
4862 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4863 pFpuCtx->FSW = fFsw;
4864
4865 iemFpuRotateStackPush(pFpuCtx);
4866 RT_NOREF(pVCpu);
4867}
4868
4869
4870/**
4871 * Stores a result in a FPU register and updates the FSW and FTW.
4872 *
4873 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4874 * @param pFpuCtx The FPU context.
4875 * @param pResult The result to store.
4876 * @param iStReg Which FPU register to store it in.
4877 */
4878static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4879{
4880 Assert(iStReg < 8);
4881 uint16_t fNewFsw = pFpuCtx->FSW;
4882 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;
4883 fNewFsw &= ~X86_FSW_C_MASK;
4884 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4885 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4886 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4887 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4888 pFpuCtx->FSW = fNewFsw;
4889 pFpuCtx->FTW |= RT_BIT(iReg);
4890 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
4891 RT_NOREF(pVCpu);
4892}
4893
4894
4895/**
4896 * Only updates the FPU status word (FSW) with the result of the current
4897 * instruction.
4898 *
4899 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4900 * @param pFpuCtx The FPU context.
4901 * @param u16FSW The FSW output of the current instruction.
4902 */
4903static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
4904{
4905 uint16_t fNewFsw = pFpuCtx->FSW;
4906 fNewFsw &= ~X86_FSW_C_MASK;
4907 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;
4908 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4909 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4910 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4911 pFpuCtx->FSW = fNewFsw;
4912 RT_NOREF(pVCpu);
4913}
4914
4915
4916/**
4917 * Pops one item off the FPU stack if no pending exception prevents it.
4918 *
4919 * @param pFpuCtx The FPU context.
4920 */
4921static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4922{
4923 /* Check pending exceptions. */
4924 uint16_t uFSW = pFpuCtx->FSW;
4925 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4926 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4927 return;
4928
4929 /* TOP--. */
4930 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
4931 uFSW &= ~X86_FSW_TOP_MASK;
4932 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4933 pFpuCtx->FSW = uFSW;
4934
4935 /* Mark the previous ST0 as empty. */
4936 iOldTop >>= X86_FSW_TOP_SHIFT;
4937 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
4938
4939 /* Rotate the registers. */
4940 iemFpuRotateStackPop(pFpuCtx);
4941}
4942
4943
4944/**
4945 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
4946 *
4947 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4948 * @param pResult The FPU operation result to push.
4949 * @param uFpuOpcode The FPU opcode value.
4950 */
4951void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
4952{
4953 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4954 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
4955 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4956}
4957
4958
4959/**
4960 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
4961 * and sets FPUDP and FPUDS.
4962 *
4963 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4964 * @param pResult The FPU operation result to push.
4965 * @param iEffSeg The effective segment register.
4966 * @param GCPtrEff The effective address relative to @a iEffSeg.
4967 * @param uFpuOpcode The FPU opcode value.
4968 */
4969void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff,
4970 uint16_t uFpuOpcode) RT_NOEXCEPT
4971{
4972 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4973 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4974 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
4975 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4976}
4977
4978
4979/**
4980 * Replace ST0 with the first value and push the second onto the FPU stack,
4981 * unless a pending exception prevents it.
4982 *
4983 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4984 * @param pResult The FPU operation result to store and push.
4985 * @param uFpuOpcode The FPU opcode value.
4986 */
4987void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
4988{
4989 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4990 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
4991
4992 /* Update FSW and bail if there are pending exceptions afterwards. */
4993 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4994 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4995 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4996 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4997 {
4998 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4999 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",
5000 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
5001 pFpuCtx->FSW = fFsw;
5002 return;
5003 }
5004
5005 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5006 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5007 {
5008 /* All is fine, push the actual value. */
5009 pFpuCtx->FTW |= RT_BIT(iNewTop);
5010 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
5011 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
5012 }
5013 else if (pFpuCtx->FCW & X86_FCW_IM)
5014 {
5015 /* Masked stack overflow, push QNaN. */
5016 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5017 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5018 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5019 }
5020 else
5021 {
5022 /* Raise stack overflow, don't push anything. */
5023 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5024 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5025 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",
5026 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5027 return;
5028 }
5029
5030 fFsw &= ~X86_FSW_TOP_MASK;
5031 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5032 pFpuCtx->FSW = fFsw;
5033
5034 iemFpuRotateStackPush(pFpuCtx);
5035}
5036
5037
5038/**
5039 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5040 * FOP.
5041 *
5042 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5043 * @param pResult The result to store.
5044 * @param iStReg Which FPU register to store it in.
5045 * @param uFpuOpcode The FPU opcode value.
5046 */
5047void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5048{
5049 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5050 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5051 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5052}
5053
5054
5055/**
5056 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5057 * FOP, and then pops the stack.
5058 *
5059 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5060 * @param pResult The result to store.
5061 * @param iStReg Which FPU register to store it in.
5062 * @param uFpuOpcode The FPU opcode value.
5063 */
5064void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5065{
5066 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5067 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5068 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5069 iemFpuMaybePopOne(pFpuCtx);
5070}
5071
5072
5073/**
5074 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5075 * FPUDP, and FPUDS.
5076 *
5077 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5078 * @param pResult The result to store.
5079 * @param iStReg Which FPU register to store it in.
5080 * @param iEffSeg The effective memory operand selector register.
5081 * @param GCPtrEff The effective memory operand offset.
5082 * @param uFpuOpcode The FPU opcode value.
5083 */
5084void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5085 uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5086{
5087 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5088 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5089 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5090 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5091}
5092
5093
5094/**
5095 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5096 * FPUDP, and FPUDS, and then pops the stack.
5097 *
5098 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5099 * @param pResult The result to store.
5100 * @param iStReg Which FPU register to store it in.
5101 * @param iEffSeg The effective memory operand selector register.
5102 * @param GCPtrEff The effective memory operand offset.
5103 * @param uFpuOpcode The FPU opcode value.
5104 */
5105void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
5106 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5107{
5108 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5109 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5110 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5111 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5112 iemFpuMaybePopOne(pFpuCtx);
5113}
5114
5115
5116/**
5117 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5118 *
5119 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5120 * @param uFpuOpcode The FPU opcode value.
5121 */
5122void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5123{
5124 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5125 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5126}
5127
5128
5129/**
5130 * Updates the FSW, FOP, FPUIP, and FPUCS.
5131 *
5132 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5133 * @param u16FSW The FSW from the current instruction.
5134 * @param uFpuOpcode The FPU opcode value.
5135 */
5136void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5137{
5138 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5139 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5140 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5141}
5142
5143
5144/**
5145 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5146 *
5147 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5148 * @param u16FSW The FSW from the current instruction.
5149 * @param uFpuOpcode The FPU opcode value.
5150 */
5151void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5152{
5153 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5154 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5155 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5156 iemFpuMaybePopOne(pFpuCtx);
5157}
5158
5159
5160/**
5161 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5162 *
5163 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5164 * @param u16FSW The FSW from the current instruction.
5165 * @param iEffSeg The effective memory operand selector register.
5166 * @param GCPtrEff The effective memory operand offset.
5167 * @param uFpuOpcode The FPU opcode value.
5168 */
5169void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5170{
5171 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5172 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5173 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5174 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5175}
5176
5177
5178/**
5179 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5180 *
5181 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5182 * @param u16FSW The FSW from the current instruction.
5183 * @param uFpuOpcode The FPU opcode value.
5184 */
5185void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5186{
5187 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5188 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5189 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5190 iemFpuMaybePopOne(pFpuCtx);
5191 iemFpuMaybePopOne(pFpuCtx);
5192}
5193
5194
5195/**
5196 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5197 *
5198 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5199 * @param u16FSW The FSW from the current instruction.
5200 * @param iEffSeg The effective memory operand selector register.
5201 * @param GCPtrEff The effective memory operand offset.
5202 * @param uFpuOpcode The FPU opcode value.
5203 */
5204void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5205 uint16_t uFpuOpcode) RT_NOEXCEPT
5206{
5207 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5208 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5209 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5210 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5211 iemFpuMaybePopOne(pFpuCtx);
5212}
5213
5214
5215/**
5216 * Worker routine for raising an FPU stack underflow exception.
5217 *
5218 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5219 * @param pFpuCtx The FPU context.
5220 * @param iStReg The stack register being accessed.
5221 */
5222static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5223{
5224 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5225 if (pFpuCtx->FCW & X86_FCW_IM)
5226 {
5227 /* Masked underflow. */
5228 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5229 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5230 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5231 if (iStReg != UINT8_MAX)
5232 {
5233 pFpuCtx->FTW |= RT_BIT(iReg);
5234 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5235 }
5236 }
5237 else
5238 {
5239 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5240 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5241 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",
5242 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5243 }
5244 RT_NOREF(pVCpu);
5245}
5246
5247
5248/**
5249 * Raises a FPU stack underflow exception.
5250 *
5251 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5252 * @param iStReg The destination register that should be loaded
5253 * with QNaN if \#IS is not masked. Specify
5254 * UINT8_MAX if none (like for fcom).
5255 * @param uFpuOpcode The FPU opcode value.
5256 */
5257void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5258{
5259 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5260 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5261 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5262}
5263
5264
5265void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5266{
5267 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5268 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5269 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5270 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5271}
5272
5273
5274void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5275{
5276 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5277 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5278 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5279 iemFpuMaybePopOne(pFpuCtx);
5280}
5281
5282
5283void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5284 uint16_t uFpuOpcode) RT_NOEXCEPT
5285{
5286 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5287 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5288 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5289 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5290 iemFpuMaybePopOne(pFpuCtx);
5291}
5292
5293
5294void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5295{
5296 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5297 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5298 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
5299 iemFpuMaybePopOne(pFpuCtx);
5300 iemFpuMaybePopOne(pFpuCtx);
5301}
5302
5303
5304void iemFpuStackPushUnderflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5305{
5306 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5307 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5308
5309 if (pFpuCtx->FCW & X86_FCW_IM)
5310 {
5311 /* Masked overflow - Push QNaN. */
5312 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5313 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5314 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5315 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5316 pFpuCtx->FTW |= RT_BIT(iNewTop);
5317 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5318 iemFpuRotateStackPush(pFpuCtx);
5319 }
5320 else
5321 {
5322 /* Exception pending - don't change TOP or the register stack. */
5323 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5324 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5325 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",
5326 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5327 }
5328}
5329
5330
5331void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5332{
5333 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5334 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5335
5336 if (pFpuCtx->FCW & X86_FCW_IM)
5337 {
5338 /* Masked overflow - Push QNaN. */
5339 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5340 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5341 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5342 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5343 pFpuCtx->FTW |= RT_BIT(iNewTop);
5344 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5345 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5346 iemFpuRotateStackPush(pFpuCtx);
5347 }
5348 else
5349 {
5350 /* Exception pending - don't change TOP or the register stack. */
5351 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5352 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5353 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",
5354 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5355 }
5356}
5357
5358
5359/**
5360 * Worker routine for raising an FPU stack overflow exception on a push.
5361 *
5362 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5363 * @param pFpuCtx The FPU context.
5364 */
5365static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5366{
5367 if (pFpuCtx->FCW & X86_FCW_IM)
5368 {
5369 /* Masked overflow. */
5370 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5371 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5372 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5373 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5374 pFpuCtx->FTW |= RT_BIT(iNewTop);
5375 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5376 iemFpuRotateStackPush(pFpuCtx);
5377 }
5378 else
5379 {
5380 /* Exception pending - don't change TOP or the register stack. */
5381 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5382 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5383 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",
5384 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5385 }
5386 RT_NOREF(pVCpu);
5387}
5388
5389
5390/**
5391 * Raises a FPU stack overflow exception on a push.
5392 *
5393 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5394 * @param uFpuOpcode The FPU opcode value.
5395 */
5396void iemFpuStackPushOverflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5397{
5398 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5399 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5400 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5401}
5402
5403
5404/**
5405 * Raises a FPU stack overflow exception on a push with a memory operand.
5406 *
5407 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5408 * @param iEffSeg The effective memory operand selector register.
5409 * @param GCPtrEff The effective memory operand offset.
5410 * @param uFpuOpcode The FPU opcode value.
5411 */
5412void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5413{
5414 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5415 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5416 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5417 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5418}
5419
5420/** @} */
5421
5422
5423/** @name SSE+AVX SIMD access and helpers.
5424 *
5425 * @{
5426 */
5427/**
5428 * Stores a result in a SIMD XMM register, updates the MXCSR.
5429 *
5430 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5431 * @param pResult The result to store.
5432 * @param iXmmReg Which SIMD XMM register to store the result in.
5433 */
5434void iemSseStoreResult(PVMCPUCC pVCpu, PCIEMSSERESULT pResult, uint8_t iXmmReg) RT_NOEXCEPT
5435{
5436 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5437 pFpuCtx->MXCSR |= pResult->MXCSR & X86_MXCSR_XCPT_FLAGS;
5438
5439 /* The result is only updated if there is no unmasked exception pending. */
5440 if (( ~((pFpuCtx->MXCSR & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT)
5441 & (pFpuCtx->MXCSR & X86_MXCSR_XCPT_FLAGS)) == 0)
5442 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXmmReg] = pResult->uResult;
5443}
5444
5445
5446/**
5447 * Updates the MXCSR.
5448 *
5449 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5450 * @param fMxcsr The new MXCSR value.
5451 */
5452void iemSseUpdateMxcsr(PVMCPUCC pVCpu, uint32_t fMxcsr) RT_NOEXCEPT
5453{
5454 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5455 pFpuCtx->MXCSR |= fMxcsr & X86_MXCSR_XCPT_FLAGS;
5456}
5457/** @} */
5458
5459
5460/** @name Memory access.
5461 *
5462 * @{
5463 */
5464
5465
5466/**
5467 * Updates the IEMCPU::cbWritten counter if applicable.
5468 *
5469 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5470 * @param fAccess The access being accounted for.
5471 * @param cbMem The access size.
5472 */
5473DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5474{
5475 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5476 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5477 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5478}
5479
5480
5481/**
5482 * Applies the segment limit, base and attributes.
5483 *
5484 * This may raise a \#GP or \#SS.
5485 *
5486 * @returns VBox strict status code.
5487 *
5488 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5489 * @param fAccess The kind of access which is being performed.
5490 * @param iSegReg The index of the segment register to apply.
5491 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5492 * TSS, ++).
5493 * @param cbMem The access size.
5494 * @param pGCPtrMem Pointer to the guest memory address to apply
5495 * segmentation to. Input and output parameter.
5496 */
5497VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5498{
5499 if (iSegReg == UINT8_MAX)
5500 return VINF_SUCCESS;
5501
5502 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5503 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5504 switch (IEM_GET_CPU_MODE(pVCpu))
5505 {
5506 case IEMMODE_16BIT:
5507 case IEMMODE_32BIT:
5508 {
5509 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5510 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5511
5512 if ( pSel->Attr.n.u1Present
5513 && !pSel->Attr.n.u1Unusable)
5514 {
5515 Assert(pSel->Attr.n.u1DescType);
5516 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5517 {
5518 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5519 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5520 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5521
5522 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5523 {
5524 /** @todo CPL check. */
5525 }
5526
5527 /*
5528 * There are two kinds of data selectors, normal and expand down.
5529 */
5530 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5531 {
5532 if ( GCPtrFirst32 > pSel->u32Limit
5533 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5534 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5535 }
5536 else
5537 {
5538 /*
5539 * The upper boundary is defined by the B bit, not the G bit!
5540 */
5541 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5542 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5543 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5544 }
5545 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5546 }
5547 else
5548 {
5549 /*
5550 * Code selector and usually be used to read thru, writing is
5551 * only permitted in real and V8086 mode.
5552 */
5553 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5554 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5555 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5556 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5557 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5558
5559 if ( GCPtrFirst32 > pSel->u32Limit
5560 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5561 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5562
5563 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5564 {
5565 /** @todo CPL check. */
5566 }
5567
5568 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5569 }
5570 }
5571 else
5572 return iemRaiseGeneralProtectionFault0(pVCpu);
5573 return VINF_SUCCESS;
5574 }
5575
5576 case IEMMODE_64BIT:
5577 {
5578 RTGCPTR GCPtrMem = *pGCPtrMem;
5579 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5580 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5581
5582 Assert(cbMem >= 1);
5583 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5584 return VINF_SUCCESS;
5585 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5586 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5587 return iemRaiseGeneralProtectionFault0(pVCpu);
5588 }
5589
5590 default:
5591 AssertFailedReturn(VERR_IEM_IPE_7);
5592 }
5593}
5594
5595
5596/**
5597 * Translates a virtual address to a physical physical address and checks if we
5598 * can access the page as specified.
5599 *
5600 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5601 * @param GCPtrMem The virtual address.
5602 * @param cbAccess The access size, for raising \#PF correctly for
5603 * FXSAVE and such.
5604 * @param fAccess The intended access.
5605 * @param pGCPhysMem Where to return the physical address.
5606 */
5607VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess,
5608 uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5609{
5610 /** @todo Need a different PGM interface here. We're currently using
5611 * generic / REM interfaces. this won't cut it for R0. */
5612 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5613 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5614 * here. */
5615 PGMPTWALK Walk;
5616 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
5617 if (RT_FAILURE(rc))
5618 {
5619 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5620 /** @todo Check unassigned memory in unpaged mode. */
5621 /** @todo Reserved bits in page tables. Requires new PGM interface. */
5622#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5623 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5624 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5625#endif
5626 *pGCPhysMem = NIL_RTGCPHYS;
5627 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, rc);
5628 }
5629
5630 /* If the page is writable and does not have the no-exec bit set, all
5631 access is allowed. Otherwise we'll have to check more carefully... */
5632 if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
5633 {
5634 /* Write to read only memory? */
5635 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5636 && !(Walk.fEffective & X86_PTE_RW)
5637 && ( ( IEM_GET_CPL(pVCpu) == 3
5638 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5639 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
5640 {
5641 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5642 *pGCPhysMem = NIL_RTGCPHYS;
5643#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5644 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5645 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5646#endif
5647 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5648 }
5649
5650 /* Kernel memory accessed by userland? */
5651 if ( !(Walk.fEffective & X86_PTE_US)
5652 && IEM_GET_CPL(pVCpu) == 3
5653 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5654 {
5655 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5656 *pGCPhysMem = NIL_RTGCPHYS;
5657#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5658 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5659 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5660#endif
5661 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, VERR_ACCESS_DENIED);
5662 }
5663
5664 /* Executing non-executable memory? */
5665 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
5666 && (Walk.fEffective & X86_PTE_PAE_NX)
5667 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
5668 {
5669 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
5670 *pGCPhysMem = NIL_RTGCPHYS;
5671#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5672 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5673 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5674#endif
5675 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
5676 VERR_ACCESS_DENIED);
5677 }
5678 }
5679
5680 /*
5681 * Set the dirty / access flags.
5682 * ASSUMES this is set when the address is translated rather than on committ...
5683 */
5684 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5685 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
5686 if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty)
5687 {
5688 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
5689 AssertRC(rc2);
5690 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
5691 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
5692 }
5693
5694 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
5695 *pGCPhysMem = GCPhys;
5696 return VINF_SUCCESS;
5697}
5698
5699
5700/**
5701 * Looks up a memory mapping entry.
5702 *
5703 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
5704 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5705 * @param pvMem The memory address.
5706 * @param fAccess The access to.
5707 */
5708DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
5709{
5710 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5711 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
5712 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
5713 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5714 return 0;
5715 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
5716 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5717 return 1;
5718 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
5719 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5720 return 2;
5721 return VERR_NOT_FOUND;
5722}
5723
5724
5725/**
5726 * Finds a free memmap entry when using iNextMapping doesn't work.
5727 *
5728 * @returns Memory mapping index, 1024 on failure.
5729 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5730 */
5731static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
5732{
5733 /*
5734 * The easy case.
5735 */
5736 if (pVCpu->iem.s.cActiveMappings == 0)
5737 {
5738 pVCpu->iem.s.iNextMapping = 1;
5739 return 0;
5740 }
5741
5742 /* There should be enough mappings for all instructions. */
5743 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
5744
5745 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
5746 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
5747 return i;
5748
5749 AssertFailedReturn(1024);
5750}
5751
5752
5753/**
5754 * Commits a bounce buffer that needs writing back and unmaps it.
5755 *
5756 * @returns Strict VBox status code.
5757 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5758 * @param iMemMap The index of the buffer to commit.
5759 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
5760 * Always false in ring-3, obviously.
5761 */
5762static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
5763{
5764 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
5765 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
5766#ifdef IN_RING3
5767 Assert(!fPostponeFail);
5768 RT_NOREF_PV(fPostponeFail);
5769#endif
5770
5771 /*
5772 * Do the writing.
5773 */
5774 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5775 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
5776 {
5777 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
5778 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5779 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5780 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
5781 {
5782 /*
5783 * Carefully and efficiently dealing with access handler return
5784 * codes make this a little bloated.
5785 */
5786 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
5787 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5788 pbBuf,
5789 cbFirst,
5790 PGMACCESSORIGIN_IEM);
5791 if (rcStrict == VINF_SUCCESS)
5792 {
5793 if (cbSecond)
5794 {
5795 rcStrict = PGMPhysWrite(pVM,
5796 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5797 pbBuf + cbFirst,
5798 cbSecond,
5799 PGMACCESSORIGIN_IEM);
5800 if (rcStrict == VINF_SUCCESS)
5801 { /* nothing */ }
5802 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5803 {
5804 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
5805 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5806 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5807 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5808 }
5809#ifndef IN_RING3
5810 else if (fPostponeFail)
5811 {
5812 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5813 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5814 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5815 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5816 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5817 return iemSetPassUpStatus(pVCpu, rcStrict);
5818 }
5819#endif
5820 else
5821 {
5822 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5823 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5824 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5825 return rcStrict;
5826 }
5827 }
5828 }
5829 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5830 {
5831 if (!cbSecond)
5832 {
5833 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
5834 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5835 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5836 }
5837 else
5838 {
5839 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
5840 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5841 pbBuf + cbFirst,
5842 cbSecond,
5843 PGMACCESSORIGIN_IEM);
5844 if (rcStrict2 == VINF_SUCCESS)
5845 {
5846 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
5847 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5848 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5849 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5850 }
5851 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5852 {
5853 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
5854 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5855 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5856 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5857 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5858 }
5859#ifndef IN_RING3
5860 else if (fPostponeFail)
5861 {
5862 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5863 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5864 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5865 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5866 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5867 return iemSetPassUpStatus(pVCpu, rcStrict);
5868 }
5869#endif
5870 else
5871 {
5872 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5873 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5874 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5875 return rcStrict2;
5876 }
5877 }
5878 }
5879#ifndef IN_RING3
5880 else if (fPostponeFail)
5881 {
5882 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5883 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5884 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5885 if (!cbSecond)
5886 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
5887 else
5888 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
5889 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5890 return iemSetPassUpStatus(pVCpu, rcStrict);
5891 }
5892#endif
5893 else
5894 {
5895 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5896 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5897 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5898 return rcStrict;
5899 }
5900 }
5901 else
5902 {
5903 /*
5904 * No access handlers, much simpler.
5905 */
5906 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
5907 if (RT_SUCCESS(rc))
5908 {
5909 if (cbSecond)
5910 {
5911 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
5912 if (RT_SUCCESS(rc))
5913 { /* likely */ }
5914 else
5915 {
5916 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5917 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5918 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
5919 return rc;
5920 }
5921 }
5922 }
5923 else
5924 {
5925 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5926 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
5927 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5928 return rc;
5929 }
5930 }
5931 }
5932
5933#if defined(IEM_LOG_MEMORY_WRITES)
5934 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5935 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
5936 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
5937 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5938 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
5939 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
5940
5941 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5942 g_cbIemWrote = cbWrote;
5943 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
5944#endif
5945
5946 /*
5947 * Free the mapping entry.
5948 */
5949 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5950 Assert(pVCpu->iem.s.cActiveMappings != 0);
5951 pVCpu->iem.s.cActiveMappings--;
5952 return VINF_SUCCESS;
5953}
5954
5955
5956/**
5957 * iemMemMap worker that deals with a request crossing pages.
5958 */
5959static VBOXSTRICTRC
5960iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
5961{
5962 Assert(cbMem <= GUEST_PAGE_SIZE);
5963
5964 /*
5965 * Do the address translations.
5966 */
5967 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
5968 RTGCPHYS GCPhysFirst;
5969 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
5970 if (rcStrict != VINF_SUCCESS)
5971 return rcStrict;
5972 Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
5973
5974 uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
5975 RTGCPHYS GCPhysSecond;
5976 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
5977 cbSecondPage, fAccess, &GCPhysSecond);
5978 if (rcStrict != VINF_SUCCESS)
5979 return rcStrict;
5980 Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
5981 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
5982
5983 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5984
5985 /*
5986 * Read in the current memory content if it's a read, execute or partial
5987 * write access.
5988 */
5989 uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5990
5991 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5992 {
5993 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
5994 {
5995 /*
5996 * Must carefully deal with access handler status codes here,
5997 * makes the code a bit bloated.
5998 */
5999 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
6000 if (rcStrict == VINF_SUCCESS)
6001 {
6002 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6003 if (rcStrict == VINF_SUCCESS)
6004 { /*likely */ }
6005 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6006 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6007 else
6008 {
6009 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
6010 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6011 return rcStrict;
6012 }
6013 }
6014 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6015 {
6016 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6017 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6018 {
6019 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6020 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6021 }
6022 else
6023 {
6024 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6025 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6026 return rcStrict2;
6027 }
6028 }
6029 else
6030 {
6031 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6032 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6033 return rcStrict;
6034 }
6035 }
6036 else
6037 {
6038 /*
6039 * No informational status codes here, much more straight forward.
6040 */
6041 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6042 if (RT_SUCCESS(rc))
6043 {
6044 Assert(rc == VINF_SUCCESS);
6045 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6046 if (RT_SUCCESS(rc))
6047 Assert(rc == VINF_SUCCESS);
6048 else
6049 {
6050 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6051 return rc;
6052 }
6053 }
6054 else
6055 {
6056 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6057 return rc;
6058 }
6059 }
6060 }
6061#ifdef VBOX_STRICT
6062 else
6063 memset(pbBuf, 0xcc, cbMem);
6064 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6065 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6066#endif
6067 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
6068
6069 /*
6070 * Commit the bounce buffer entry.
6071 */
6072 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6073 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6074 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6075 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6076 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
6077 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6078 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6079 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6080 pVCpu->iem.s.cActiveMappings++;
6081
6082 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6083 *ppvMem = pbBuf;
6084 return VINF_SUCCESS;
6085}
6086
6087
6088/**
6089 * iemMemMap woker that deals with iemMemPageMap failures.
6090 */
6091static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
6092 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6093{
6094 /*
6095 * Filter out conditions we can handle and the ones which shouldn't happen.
6096 */
6097 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6098 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6099 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6100 {
6101 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6102 return rcMap;
6103 }
6104 pVCpu->iem.s.cPotentialExits++;
6105
6106 /*
6107 * Read in the current memory content if it's a read, execute or partial
6108 * write access.
6109 */
6110 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6111 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6112 {
6113 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6114 memset(pbBuf, 0xff, cbMem);
6115 else
6116 {
6117 int rc;
6118 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6119 {
6120 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6121 if (rcStrict == VINF_SUCCESS)
6122 { /* nothing */ }
6123 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6124 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6125 else
6126 {
6127 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6128 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6129 return rcStrict;
6130 }
6131 }
6132 else
6133 {
6134 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
6135 if (RT_SUCCESS(rc))
6136 { /* likely */ }
6137 else
6138 {
6139 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6140 GCPhysFirst, rc));
6141 return rc;
6142 }
6143 }
6144 }
6145 }
6146#ifdef VBOX_STRICT
6147 else
6148 memset(pbBuf, 0xcc, cbMem);
6149#endif
6150#ifdef VBOX_STRICT
6151 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6152 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6153#endif
6154
6155 /*
6156 * Commit the bounce buffer entry.
6157 */
6158 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6159 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6160 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6161 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
6162 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6163 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6164 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6165 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6166 pVCpu->iem.s.cActiveMappings++;
6167
6168 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6169 *ppvMem = pbBuf;
6170 return VINF_SUCCESS;
6171}
6172
6173
6174
6175/**
6176 * Maps the specified guest memory for the given kind of access.
6177 *
6178 * This may be using bounce buffering of the memory if it's crossing a page
6179 * boundary or if there is an access handler installed for any of it. Because
6180 * of lock prefix guarantees, we're in for some extra clutter when this
6181 * happens.
6182 *
6183 * This may raise a \#GP, \#SS, \#PF or \#AC.
6184 *
6185 * @returns VBox strict status code.
6186 *
6187 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6188 * @param ppvMem Where to return the pointer to the mapped memory.
6189 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
6190 * 8, 12, 16, 32 or 512. When used by string operations
6191 * it can be up to a page.
6192 * @param iSegReg The index of the segment register to use for this
6193 * access. The base and limits are checked. Use UINT8_MAX
6194 * to indicate that no segmentation is required (for IDT,
6195 * GDT and LDT accesses).
6196 * @param GCPtrMem The address of the guest memory.
6197 * @param fAccess How the memory is being accessed. The
6198 * IEM_ACCESS_TYPE_XXX bit is used to figure out how to map
6199 * the memory, while the IEM_ACCESS_WHAT_XXX bit is used
6200 * when raising exceptions.
6201 * @param uAlignCtl Alignment control:
6202 * - Bits 15:0 is the alignment mask.
6203 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6204 * IEM_MEMMAP_F_ALIGN_SSE, and
6205 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6206 * Pass zero to skip alignment.
6207 */
6208VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
6209 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
6210{
6211 /*
6212 * Check the input and figure out which mapping entry to use.
6213 */
6214 Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));
6215 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94
6216 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );
6217 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6218 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6219
6220 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6221 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6222 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6223 {
6224 iMemMap = iemMemMapFindFree(pVCpu);
6225 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6226 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6227 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6228 pVCpu->iem.s.aMemMappings[2].fAccess),
6229 VERR_IEM_IPE_9);
6230 }
6231
6232 /*
6233 * Map the memory, checking that we can actually access it. If something
6234 * slightly complicated happens, fall back on bounce buffering.
6235 */
6236 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6237 if (rcStrict == VINF_SUCCESS)
6238 { /* likely */ }
6239 else
6240 return rcStrict;
6241
6242 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
6243 { /* likely */ }
6244 else
6245 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
6246
6247 /*
6248 * Alignment check.
6249 */
6250 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6251 { /* likelyish */ }
6252 else
6253 {
6254 /* Misaligned access. */
6255 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6256 {
6257 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6258 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6259 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6260 {
6261 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6262
6263 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6264 return iemRaiseAlignmentCheckException(pVCpu);
6265 }
6266 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6267 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6268 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6269 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6270 * that's what FXSAVE does on a 10980xe. */
6271 && iemMemAreAlignmentChecksEnabled(pVCpu))
6272 return iemRaiseAlignmentCheckException(pVCpu);
6273 else
6274 return iemRaiseGeneralProtectionFault0(pVCpu);
6275 }
6276 }
6277
6278#ifdef IEM_WITH_DATA_TLB
6279 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6280
6281 /*
6282 * Get the TLB entry for this page.
6283 */
6284 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6285 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6286 if (pTlbe->uTag == uTag)
6287 {
6288# ifdef VBOX_WITH_STATISTICS
6289 pVCpu->iem.s.DataTlb.cTlbHits++;
6290# endif
6291 }
6292 else
6293 {
6294 pVCpu->iem.s.DataTlb.cTlbMisses++;
6295 PGMPTWALK Walk;
6296 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6297 if (RT_FAILURE(rc))
6298 {
6299 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6300# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6301 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6302 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6303# endif
6304 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6305 }
6306
6307 Assert(Walk.fSucceeded);
6308 pTlbe->uTag = uTag;
6309 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6310 pTlbe->GCPhys = Walk.GCPhys;
6311 pTlbe->pbMappingR3 = NULL;
6312 }
6313
6314 /*
6315 * Check TLB page table level access flags.
6316 */
6317 /* If the page is either supervisor only or non-writable, we need to do
6318 more careful access checks. */
6319 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
6320 {
6321 /* Write to read only memory? */
6322 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
6323 && (fAccess & IEM_ACCESS_TYPE_WRITE)
6324 && ( ( IEM_GET_CPL(pVCpu) == 3
6325 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6326 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
6327 {
6328 Log(("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6329# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6330 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6331 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6332# endif
6333 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6334 }
6335
6336 /* Kernel memory accessed by userland? */
6337 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6338 && IEM_GET_CPL(pVCpu) == 3
6339 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6340 {
6341 Log(("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6342# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6343 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6344 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6345# endif
6346 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6347 }
6348 }
6349
6350 /*
6351 * Set the dirty / access flags.
6352 * ASSUMES this is set when the address is translated rather than on commit...
6353 */
6354 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6355 uint64_t const fTlbAccessedDirty = (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0) | IEMTLBE_F_PT_NO_ACCESSED;
6356 if (pTlbe->fFlagsAndPhysRev & fTlbAccessedDirty)
6357 {
6358 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6359 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6360 AssertRC(rc2);
6361 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6362 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6363 pTlbe->fFlagsAndPhysRev &= ~fTlbAccessedDirty;
6364 }
6365
6366 /*
6367 * Look up the physical page info if necessary.
6368 */
6369 uint8_t *pbMem = NULL;
6370 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6371# ifdef IN_RING3
6372 pbMem = pTlbe->pbMappingR3;
6373# else
6374 pbMem = NULL;
6375# endif
6376 else
6377 {
6378 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6379 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6380 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6381 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6382 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
6383 { /* likely */ }
6384 else
6385 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
6386 pTlbe->pbMappingR3 = NULL;
6387 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6388 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
6389 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6390 &pbMem, &pTlbe->fFlagsAndPhysRev);
6391 AssertRCReturn(rc, rc);
6392# ifdef IN_RING3
6393 pTlbe->pbMappingR3 = pbMem;
6394# endif
6395 }
6396
6397 /*
6398 * Check the physical page level access and mapping.
6399 */
6400 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
6401 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
6402 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
6403 { /* probably likely */ }
6404 else
6405 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem,
6406 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6407 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6408 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6409 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6410 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6411
6412 if (pbMem)
6413 {
6414 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6415 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6416 fAccess |= IEM_ACCESS_NOT_LOCKED;
6417 }
6418 else
6419 {
6420 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6421 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6422 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6423 if (rcStrict != VINF_SUCCESS)
6424 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6425 }
6426
6427 void * const pvMem = pbMem;
6428
6429 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6430 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6431 if (fAccess & IEM_ACCESS_TYPE_READ)
6432 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6433
6434#else /* !IEM_WITH_DATA_TLB */
6435
6436 RTGCPHYS GCPhysFirst;
6437 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6438 if (rcStrict != VINF_SUCCESS)
6439 return rcStrict;
6440
6441 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6442 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6443 if (fAccess & IEM_ACCESS_TYPE_READ)
6444 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6445
6446 void *pvMem;
6447 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6448 if (rcStrict != VINF_SUCCESS)
6449 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6450
6451#endif /* !IEM_WITH_DATA_TLB */
6452
6453 /*
6454 * Fill in the mapping table entry.
6455 */
6456 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6457 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6458 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6459 pVCpu->iem.s.cActiveMappings += 1;
6460
6461 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6462 *ppvMem = pvMem;
6463
6464 return VINF_SUCCESS;
6465}
6466
6467
6468/**
6469 * Commits the guest memory if bounce buffered and unmaps it.
6470 *
6471 * @returns Strict VBox status code.
6472 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6473 * @param pvMem The mapping.
6474 * @param fAccess The kind of access.
6475 */
6476VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6477{
6478 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6479 AssertReturn(iMemMap >= 0, iMemMap);
6480
6481 /* If it's bounce buffered, we may need to write back the buffer. */
6482 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6483 {
6484 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6485 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6486 }
6487 /* Otherwise unlock it. */
6488 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6489 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6490
6491 /* Free the entry. */
6492 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6493 Assert(pVCpu->iem.s.cActiveMappings != 0);
6494 pVCpu->iem.s.cActiveMappings--;
6495 return VINF_SUCCESS;
6496}
6497
6498#ifdef IEM_WITH_SETJMP
6499
6500/**
6501 * Maps the specified guest memory for the given kind of access, longjmp on
6502 * error.
6503 *
6504 * This may be using bounce buffering of the memory if it's crossing a page
6505 * boundary or if there is an access handler installed for any of it. Because
6506 * of lock prefix guarantees, we're in for some extra clutter when this
6507 * happens.
6508 *
6509 * This may raise a \#GP, \#SS, \#PF or \#AC.
6510 *
6511 * @returns Pointer to the mapped memory.
6512 *
6513 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6514 * @param cbMem The number of bytes to map. This is usually 1,
6515 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6516 * string operations it can be up to a page.
6517 * @param iSegReg The index of the segment register to use for
6518 * this access. The base and limits are checked.
6519 * Use UINT8_MAX to indicate that no segmentation
6520 * is required (for IDT, GDT and LDT accesses).
6521 * @param GCPtrMem The address of the guest memory.
6522 * @param fAccess How the memory is being accessed. The
6523 * IEM_ACCESS_TYPE_XXX bit is used to figure out
6524 * how to map the memory, while the
6525 * IEM_ACCESS_WHAT_XXX bit is used when raising
6526 * exceptions.
6527 * @param uAlignCtl Alignment control:
6528 * - Bits 15:0 is the alignment mask.
6529 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6530 * IEM_MEMMAP_F_ALIGN_SSE, and
6531 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6532 * Pass zero to skip alignment.
6533 */
6534void *iemMemMapJmp(PVMCPUCC pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess,
6535 uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
6536{
6537 /*
6538 * Check the input, check segment access and adjust address
6539 * with segment base.
6540 */
6541 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6542 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6543 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6544
6545 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6546 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6547 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6548
6549 /*
6550 * Alignment check.
6551 */
6552 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6553 { /* likelyish */ }
6554 else
6555 {
6556 /* Misaligned access. */
6557 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6558 {
6559 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6560 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6561 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6562 {
6563 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6564
6565 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6566 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6567 }
6568 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6569 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6570 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6571 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6572 * that's what FXSAVE does on a 10980xe. */
6573 && iemMemAreAlignmentChecksEnabled(pVCpu))
6574 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6575 else
6576 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
6577 }
6578 }
6579
6580 /*
6581 * Figure out which mapping entry to use.
6582 */
6583 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6584 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6585 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6586 {
6587 iMemMap = iemMemMapFindFree(pVCpu);
6588 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6589 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6590 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6591 pVCpu->iem.s.aMemMappings[2].fAccess),
6592 IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_9));
6593 }
6594
6595 /*
6596 * Crossing a page boundary?
6597 */
6598 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
6599 { /* No (likely). */ }
6600 else
6601 {
6602 void *pvMem;
6603 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
6604 if (rcStrict == VINF_SUCCESS)
6605 return pvMem;
6606 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6607 }
6608
6609#ifdef IEM_WITH_DATA_TLB
6610 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6611
6612 /*
6613 * Get the TLB entry for this page.
6614 */
6615 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6616 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6617 if (pTlbe->uTag == uTag)
6618 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6619 else
6620 {
6621 pVCpu->iem.s.DataTlb.cTlbMisses++;
6622 PGMPTWALK Walk;
6623 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6624 if (RT_FAILURE(rc))
6625 {
6626 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6627# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6628 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6629 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6630# endif
6631 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6632 }
6633
6634 Assert(Walk.fSucceeded);
6635 pTlbe->uTag = uTag;
6636 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6637 pTlbe->GCPhys = Walk.GCPhys;
6638 pTlbe->pbMappingR3 = NULL;
6639 }
6640
6641 /*
6642 * Check the flags and physical revision.
6643 */
6644 /** @todo make the caller pass these in with fAccess. */
6645 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && IEM_GET_CPL(pVCpu) == 3
6646 ? IEMTLBE_F_PT_NO_USER : 0;
6647 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
6648 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
6649 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
6650 || (IEM_GET_CPL(pVCpu) == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6651 ? IEMTLBE_F_PT_NO_WRITE : 0)
6652 : 0;
6653 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
6654 uint8_t *pbMem = NULL;
6655 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
6656 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6657# ifdef IN_RING3
6658 pbMem = pTlbe->pbMappingR3;
6659# else
6660 pbMem = NULL;
6661# endif
6662 else
6663 {
6664 /*
6665 * Okay, something isn't quite right or needs refreshing.
6666 */
6667 /* Write to read only memory? */
6668 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
6669 {
6670 Log(("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6671# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6672 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6673 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6674# endif
6675 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6676 }
6677
6678 /* Kernel memory accessed by userland? */
6679 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
6680 {
6681 Log(("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6682# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6683 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6684 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6685# endif
6686 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6687 }
6688
6689 /* Set the dirty / access flags.
6690 ASSUMES this is set when the address is translated rather than on commit... */
6691 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6692 if (pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED))
6693 {
6694 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6695 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6696 AssertRC(rc2);
6697 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6698 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6699 pTlbe->fFlagsAndPhysRev &= ~((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED);
6700 }
6701
6702 /*
6703 * Check if the physical page info needs updating.
6704 */
6705 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6706# ifdef IN_RING3
6707 pbMem = pTlbe->pbMappingR3;
6708# else
6709 pbMem = NULL;
6710# endif
6711 else
6712 {
6713 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6714 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6715 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6716 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6717 pTlbe->pbMappingR3 = NULL;
6718 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6719 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
6720 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6721 &pbMem, &pTlbe->fFlagsAndPhysRev);
6722 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
6723# ifdef IN_RING3
6724 pTlbe->pbMappingR3 = pbMem;
6725# endif
6726 }
6727
6728 /*
6729 * Check the physical page level access and mapping.
6730 */
6731 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
6732 { /* probably likely */ }
6733 else
6734 {
6735 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, cbMem,
6736 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6737 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6738 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6739 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6740 if (rcStrict == VINF_SUCCESS)
6741 return pbMem;
6742 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6743 }
6744 }
6745 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6746
6747 if (pbMem)
6748 {
6749 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6750 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6751 fAccess |= IEM_ACCESS_NOT_LOCKED;
6752 }
6753 else
6754 {
6755 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6756 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6757 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6758 if (rcStrict == VINF_SUCCESS)
6759 return pbMem;
6760 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6761 }
6762
6763 void * const pvMem = pbMem;
6764
6765 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6766 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6767 if (fAccess & IEM_ACCESS_TYPE_READ)
6768 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6769
6770#else /* !IEM_WITH_DATA_TLB */
6771
6772
6773 RTGCPHYS GCPhysFirst;
6774 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6775 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6776 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6777
6778 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6779 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6780 if (fAccess & IEM_ACCESS_TYPE_READ)
6781 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6782
6783 void *pvMem;
6784 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6785 if (rcStrict == VINF_SUCCESS)
6786 { /* likely */ }
6787 else
6788 {
6789 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6790 if (rcStrict == VINF_SUCCESS)
6791 return pvMem;
6792 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6793 }
6794
6795#endif /* !IEM_WITH_DATA_TLB */
6796
6797 /*
6798 * Fill in the mapping table entry.
6799 */
6800 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6801 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6802 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6803 pVCpu->iem.s.cActiveMappings++;
6804
6805 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6806 return pvMem;
6807}
6808
6809
6810/**
6811 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
6812 *
6813 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6814 * @param pvMem The mapping.
6815 * @param fAccess The kind of access.
6816 */
6817void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
6818{
6819 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6820 AssertStmt(iMemMap >= 0, IEM_DO_LONGJMP(pVCpu, iMemMap));
6821
6822 /* If it's bounce buffered, we may need to write back the buffer. */
6823 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6824 {
6825 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6826 {
6827 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6828 if (rcStrict == VINF_SUCCESS)
6829 return;
6830 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6831 }
6832 }
6833 /* Otherwise unlock it. */
6834 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6835 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6836
6837 /* Free the entry. */
6838 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6839 Assert(pVCpu->iem.s.cActiveMappings != 0);
6840 pVCpu->iem.s.cActiveMappings--;
6841}
6842
6843#endif /* IEM_WITH_SETJMP */
6844
6845#ifndef IN_RING3
6846/**
6847 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
6848 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
6849 *
6850 * Allows the instruction to be completed and retired, while the IEM user will
6851 * return to ring-3 immediately afterwards and do the postponed writes there.
6852 *
6853 * @returns VBox status code (no strict statuses). Caller must check
6854 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
6855 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6856 * @param pvMem The mapping.
6857 * @param fAccess The kind of access.
6858 */
6859VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6860{
6861 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6862 AssertReturn(iMemMap >= 0, iMemMap);
6863
6864 /* If it's bounce buffered, we may need to write back the buffer. */
6865 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6866 {
6867 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6868 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
6869 }
6870 /* Otherwise unlock it. */
6871 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6872 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6873
6874 /* Free the entry. */
6875 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6876 Assert(pVCpu->iem.s.cActiveMappings != 0);
6877 pVCpu->iem.s.cActiveMappings--;
6878 return VINF_SUCCESS;
6879}
6880#endif
6881
6882
6883/**
6884 * Rollbacks mappings, releasing page locks and such.
6885 *
6886 * The caller shall only call this after checking cActiveMappings.
6887 *
6888 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6889 */
6890void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
6891{
6892 Assert(pVCpu->iem.s.cActiveMappings > 0);
6893
6894 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
6895 while (iMemMap-- > 0)
6896 {
6897 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
6898 if (fAccess != IEM_ACCESS_INVALID)
6899 {
6900 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
6901 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6902 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
6903 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6904 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
6905 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
6906 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
6907 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
6908 pVCpu->iem.s.cActiveMappings--;
6909 }
6910 }
6911}
6912
6913
6914/**
6915 * Fetches a data byte.
6916 *
6917 * @returns Strict VBox status code.
6918 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6919 * @param pu8Dst Where to return the byte.
6920 * @param iSegReg The index of the segment register to use for
6921 * this access. The base and limits are checked.
6922 * @param GCPtrMem The address of the guest memory.
6923 */
6924VBOXSTRICTRC iemMemFetchDataU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6925{
6926 /* The lazy approach for now... */
6927 uint8_t const *pu8Src;
6928 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 0);
6929 if (rc == VINF_SUCCESS)
6930 {
6931 *pu8Dst = *pu8Src;
6932 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6933 Log9(("IEM RD byte %d|%RGv: %#04x\n", iSegReg, GCPtrMem, *pu8Dst));
6934 }
6935 return rc;
6936}
6937
6938
6939#ifdef IEM_WITH_SETJMP
6940/**
6941 * Fetches a data byte, longjmp on error.
6942 *
6943 * @returns The byte.
6944 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6945 * @param iSegReg The index of the segment register to use for
6946 * this access. The base and limits are checked.
6947 * @param GCPtrMem The address of the guest memory.
6948 */
6949uint8_t iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
6950{
6951 /* The lazy approach for now... */
6952 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 0);
6953 uint8_t const bRet = *pu8Src;
6954 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6955 Log9(("IEM RD byte %d|%RGv: %#04x\n", iSegReg, GCPtrMem, bRet));
6956 return bRet;
6957}
6958#endif /* IEM_WITH_SETJMP */
6959
6960
6961/**
6962 * Fetches a data word.
6963 *
6964 * @returns Strict VBox status code.
6965 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6966 * @param pu16Dst Where to return the word.
6967 * @param iSegReg The index of the segment register to use for
6968 * this access. The base and limits are checked.
6969 * @param GCPtrMem The address of the guest memory.
6970 */
6971VBOXSTRICTRC iemMemFetchDataU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6972{
6973 /* The lazy approach for now... */
6974 uint16_t const *pu16Src;
6975 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem,
6976 IEM_ACCESS_DATA_R, sizeof(*pu16Src) - 1);
6977 if (rc == VINF_SUCCESS)
6978 {
6979 *pu16Dst = *pu16Src;
6980 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6981 Log9(("IEM RD word %d|%RGv: %#06x\n", iSegReg, GCPtrMem, *pu16Dst));
6982 }
6983 return rc;
6984}
6985
6986
6987#ifdef IEM_WITH_SETJMP
6988/**
6989 * Fetches a data word, longjmp on error.
6990 *
6991 * @returns The word
6992 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6993 * @param iSegReg The index of the segment register to use for
6994 * this access. The base and limits are checked.
6995 * @param GCPtrMem The address of the guest memory.
6996 */
6997uint16_t iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
6998{
6999 /* The lazy approach for now... */
7000 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7001 sizeof(*pu16Src) - 1);
7002 uint16_t const u16Ret = *pu16Src;
7003 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
7004 Log9(("IEM RD word %d|%RGv: %#06x\n", iSegReg, GCPtrMem, u16Ret));
7005 return u16Ret;
7006}
7007#endif
7008
7009
7010/**
7011 * Fetches a data dword.
7012 *
7013 * @returns Strict VBox status code.
7014 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7015 * @param pu32Dst Where to return the dword.
7016 * @param iSegReg The index of the segment register to use for
7017 * this access. The base and limits are checked.
7018 * @param GCPtrMem The address of the guest memory.
7019 */
7020VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7021{
7022 /* The lazy approach for now... */
7023 uint32_t const *pu32Src;
7024 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
7025 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7026 if (rc == VINF_SUCCESS)
7027 {
7028 *pu32Dst = *pu32Src;
7029 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7030 Log9(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, *pu32Dst));
7031 }
7032 return rc;
7033}
7034
7035
7036/**
7037 * Fetches a data dword and zero extends it to a qword.
7038 *
7039 * @returns Strict VBox status code.
7040 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7041 * @param pu64Dst Where to return the qword.
7042 * @param iSegReg The index of the segment register to use for
7043 * this access. The base and limits are checked.
7044 * @param GCPtrMem The address of the guest memory.
7045 */
7046VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7047{
7048 /* The lazy approach for now... */
7049 uint32_t const *pu32Src;
7050 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
7051 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7052 if (rc == VINF_SUCCESS)
7053 {
7054 *pu64Dst = *pu32Src;
7055 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7056 Log9(("IEM RD dword %d|%RGv: %#010RX64\n", iSegReg, GCPtrMem, *pu64Dst));
7057 }
7058 return rc;
7059}
7060
7061
7062#ifdef IEM_WITH_SETJMP
7063
7064/**
7065 * Fetches a data dword, longjmp on error, fallback/safe version.
7066 *
7067 * @returns The dword
7068 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7069 * @param iSegReg The index of the segment register to use for
7070 * this access. The base and limits are checked.
7071 * @param GCPtrMem The address of the guest memory.
7072 */
7073uint32_t iemMemFetchDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7074{
7075 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7076 sizeof(*pu32Src) - 1);
7077 uint32_t const u32Ret = *pu32Src;
7078 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7079 Log9(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, u32Ret));
7080 return u32Ret;
7081}
7082
7083
7084/**
7085 * Fetches a data dword, longjmp on error.
7086 *
7087 * @returns The dword
7088 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7089 * @param iSegReg The index of the segment register to use for
7090 * this access. The base and limits are checked.
7091 * @param GCPtrMem The address of the guest memory.
7092 */
7093uint32_t iemMemFetchDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7094{
7095# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
7096 /*
7097 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
7098 */
7099 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
7100 if (RT_LIKELY((GCPtrEff & GUEST_PAGE_OFFSET_MASK) <= GUEST_PAGE_SIZE - sizeof(uint32_t)))
7101 {
7102 /*
7103 * TLB lookup.
7104 */
7105 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
7106 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
7107 if (pTlbe->uTag == uTag)
7108 {
7109 /*
7110 * Check TLB page table level access flags.
7111 */
7112 uint64_t const fNoUser = IEM_GET_CPL(pVCpu) == 3 ? IEMTLBE_F_PT_NO_USER : 0;
7113 if ( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
7114 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
7115 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
7116 {
7117 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
7118
7119 /*
7120 * Alignment check:
7121 */
7122 /** @todo check priority \#AC vs \#PF */
7123 if ( !(GCPtrEff & (sizeof(uint32_t) - 1))
7124 || !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
7125 || !pVCpu->cpum.GstCtx.eflags.Bits.u1AC
7126 || IEM_GET_CPL(pVCpu) != 3)
7127 {
7128 /*
7129 * Fetch and return the dword
7130 */
7131 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
7132 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
7133 uint32_t const u32Ret = *(uint32_t const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
7134 Log9(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, u32Ret));
7135 return u32Ret;
7136 }
7137 Log10(("iemMemFetchDataU32Jmp: Raising #AC for %RGv\n", GCPtrEff));
7138 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
7139 }
7140 }
7141 }
7142
7143 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
7144 outdated page pointer, or other troubles. */
7145 Log10(("iemMemFetchDataU32Jmp: %u:%RGv fallback\n", iSegReg, GCPtrMem));
7146 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
7147
7148# else
7149 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem,
7150 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7151 uint32_t const u32Ret = *pu32Src;
7152 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7153 Log9(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, u32Ret));
7154 return u32Ret;
7155# endif
7156}
7157#endif
7158
7159
7160#ifdef SOME_UNUSED_FUNCTION
7161/**
7162 * Fetches a data dword and sign extends it to a qword.
7163 *
7164 * @returns Strict VBox status code.
7165 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7166 * @param pu64Dst Where to return the sign extended value.
7167 * @param iSegReg The index of the segment register to use for
7168 * this access. The base and limits are checked.
7169 * @param GCPtrMem The address of the guest memory.
7170 */
7171VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7172{
7173 /* The lazy approach for now... */
7174 int32_t const *pi32Src;
7175 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem,
7176 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
7177 if (rc == VINF_SUCCESS)
7178 {
7179 *pu64Dst = *pi32Src;
7180 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
7181 Log9(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, (uint32_t)*pu64Dst));
7182 }
7183#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7184 else
7185 *pu64Dst = 0;
7186#endif
7187 return rc;
7188}
7189#endif
7190
7191
7192/**
7193 * Fetches a data qword.
7194 *
7195 * @returns Strict VBox status code.
7196 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7197 * @param pu64Dst Where to return the qword.
7198 * @param iSegReg The index of the segment register to use for
7199 * this access. The base and limits are checked.
7200 * @param GCPtrMem The address of the guest memory.
7201 */
7202VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7203{
7204 /* The lazy approach for now... */
7205 uint64_t const *pu64Src;
7206 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem,
7207 IEM_ACCESS_DATA_R, sizeof(*pu64Src) - 1);
7208 if (rc == VINF_SUCCESS)
7209 {
7210 *pu64Dst = *pu64Src;
7211 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7212 Log9(("IEM RD qword %d|%RGv: %#018RX64\n", iSegReg, GCPtrMem, *pu64Dst));
7213 }
7214 return rc;
7215}
7216
7217
7218#ifdef IEM_WITH_SETJMP
7219/**
7220 * Fetches a data qword, longjmp on error.
7221 *
7222 * @returns The qword.
7223 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7224 * @param iSegReg The index of the segment register to use for
7225 * this access. The base and limits are checked.
7226 * @param GCPtrMem The address of the guest memory.
7227 */
7228uint64_t iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7229{
7230 /* The lazy approach for now... */
7231 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem,
7232 IEM_ACCESS_DATA_R, sizeof(*pu64Src) - 1);
7233 uint64_t const u64Ret = *pu64Src;
7234 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7235 Log9(("IEM RD qword %d|%RGv: %#018RX64\n", iSegReg, GCPtrMem, u64Ret));
7236 return u64Ret;
7237}
7238#endif
7239
7240
7241/**
7242 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
7243 *
7244 * @returns Strict VBox status code.
7245 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7246 * @param pu64Dst Where to return the qword.
7247 * @param iSegReg The index of the segment register to use for
7248 * this access. The base and limits are checked.
7249 * @param GCPtrMem The address of the guest memory.
7250 */
7251VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7252{
7253 /* The lazy approach for now... */
7254 uint64_t const *pu64Src;
7255 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem,
7256 IEM_ACCESS_DATA_R, 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7257 if (rc == VINF_SUCCESS)
7258 {
7259 *pu64Dst = *pu64Src;
7260 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7261 Log9(("IEM RD qword %d|%RGv: %#018RX64\n", iSegReg, GCPtrMem, *pu64Dst));
7262 }
7263 return rc;
7264}
7265
7266
7267#ifdef IEM_WITH_SETJMP
7268/**
7269 * Fetches a data qword, longjmp on error.
7270 *
7271 * @returns The qword.
7272 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7273 * @param iSegReg The index of the segment register to use for
7274 * this access. The base and limits are checked.
7275 * @param GCPtrMem The address of the guest memory.
7276 */
7277uint64_t iemMemFetchDataU64AlignedU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7278{
7279 /* The lazy approach for now... */
7280 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7281 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7282 uint64_t const u64Ret = *pu64Src;
7283 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7284 Log9(("IEM RD qword %d|%RGv: %#018RX64\n", iSegReg, GCPtrMem, u64Ret));
7285 return u64Ret;
7286}
7287#endif
7288
7289
7290/**
7291 * Fetches a data tword.
7292 *
7293 * @returns Strict VBox status code.
7294 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7295 * @param pr80Dst Where to return the tword.
7296 * @param iSegReg The index of the segment register to use for
7297 * this access. The base and limits are checked.
7298 * @param GCPtrMem The address of the guest memory.
7299 */
7300VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7301{
7302 /* The lazy approach for now... */
7303 PCRTFLOAT80U pr80Src;
7304 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7);
7305 if (rc == VINF_SUCCESS)
7306 {
7307 *pr80Dst = *pr80Src;
7308 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7309 Log9(("IEM RD tword %d|%RGv: %.10Rhxs\n", iSegReg, GCPtrMem, pr80Dst));
7310 }
7311 return rc;
7312}
7313
7314
7315#ifdef IEM_WITH_SETJMP
7316/**
7317 * Fetches a data tword, longjmp on error.
7318 *
7319 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7320 * @param pr80Dst Where to return the tword.
7321 * @param iSegReg The index of the segment register to use for
7322 * this access. The base and limits are checked.
7323 * @param GCPtrMem The address of the guest memory.
7324 */
7325void iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7326{
7327 /* The lazy approach for now... */
7328 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7);
7329 *pr80Dst = *pr80Src;
7330 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7331 Log9(("IEM RD tword %d|%RGv: %.10Rhxs\n", iSegReg, GCPtrMem, pr80Dst));
7332}
7333#endif
7334
7335
7336/**
7337 * Fetches a data decimal tword.
7338 *
7339 * @returns Strict VBox status code.
7340 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7341 * @param pd80Dst Where to return the tword.
7342 * @param iSegReg The index of the segment register to use for
7343 * this access. The base and limits are checked.
7344 * @param GCPtrMem The address of the guest memory.
7345 */
7346VBOXSTRICTRC iemMemFetchDataD80(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7347{
7348 /* The lazy approach for now... */
7349 PCRTPBCD80U pd80Src;
7350 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pd80Src, sizeof(*pd80Src), iSegReg, GCPtrMem,
7351 IEM_ACCESS_DATA_R, 7 /** @todo FBLD alignment check */);
7352 if (rc == VINF_SUCCESS)
7353 {
7354 *pd80Dst = *pd80Src;
7355 rc = iemMemCommitAndUnmap(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
7356 Log9(("IEM RD tword %d|%RGv: %.10Rhxs\n", iSegReg, GCPtrMem, pd80Dst));
7357 }
7358 return rc;
7359}
7360
7361
7362#ifdef IEM_WITH_SETJMP
7363/**
7364 * Fetches a data decimal tword, longjmp on error.
7365 *
7366 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7367 * @param pd80Dst Where to return the tword.
7368 * @param iSegReg The index of the segment register to use for
7369 * this access. The base and limits are checked.
7370 * @param GCPtrMem The address of the guest memory.
7371 */
7372void iemMemFetchDataD80Jmp(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7373{
7374 /* The lazy approach for now... */
7375 PCRTPBCD80U pd80Src = (PCRTPBCD80U)iemMemMapJmp(pVCpu, sizeof(*pd80Src), iSegReg, GCPtrMem,
7376 IEM_ACCESS_DATA_R, 7 /** @todo FBSTP alignment check */);
7377 *pd80Dst = *pd80Src;
7378 iemMemCommitAndUnmapJmp(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
7379 Log9(("IEM RD tword %d|%RGv: %.10Rhxs\n", iSegReg, GCPtrMem, pd80Dst));
7380}
7381#endif
7382
7383
7384/**
7385 * Fetches a data dqword (double qword), generally SSE related.
7386 *
7387 * @returns Strict VBox status code.
7388 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7389 * @param pu128Dst Where to return the qword.
7390 * @param iSegReg The index of the segment register to use for
7391 * this access. The base and limits are checked.
7392 * @param GCPtrMem The address of the guest memory.
7393 */
7394VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7395{
7396 /* The lazy approach for now... */
7397 PCRTUINT128U pu128Src;
7398 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
7399 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7400 if (rc == VINF_SUCCESS)
7401 {
7402 pu128Dst->au64[0] = pu128Src->au64[0];
7403 pu128Dst->au64[1] = pu128Src->au64[1];
7404 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7405 Log9(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7406 }
7407 return rc;
7408}
7409
7410
7411#ifdef IEM_WITH_SETJMP
7412/**
7413 * Fetches a data dqword (double qword), generally SSE related.
7414 *
7415 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7416 * @param pu128Dst Where to return the qword.
7417 * @param iSegReg The index of the segment register to use for
7418 * this access. The base and limits are checked.
7419 * @param GCPtrMem The address of the guest memory.
7420 */
7421void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7422{
7423 /* The lazy approach for now... */
7424 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem,
7425 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7426 pu128Dst->au64[0] = pu128Src->au64[0];
7427 pu128Dst->au64[1] = pu128Src->au64[1];
7428 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7429 Log9(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7430}
7431#endif
7432
7433
7434/**
7435 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7436 * related.
7437 *
7438 * Raises \#GP(0) if not aligned.
7439 *
7440 * @returns Strict VBox status code.
7441 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7442 * @param pu128Dst Where to return the qword.
7443 * @param iSegReg The index of the segment register to use for
7444 * this access. The base and limits are checked.
7445 * @param GCPtrMem The address of the guest memory.
7446 */
7447VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7448{
7449 /* The lazy approach for now... */
7450 PCRTUINT128U pu128Src;
7451 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
7452 IEM_ACCESS_DATA_R, (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7453 if (rc == VINF_SUCCESS)
7454 {
7455 pu128Dst->au64[0] = pu128Src->au64[0];
7456 pu128Dst->au64[1] = pu128Src->au64[1];
7457 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7458 Log9(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7459 }
7460 return rc;
7461}
7462
7463
7464#ifdef IEM_WITH_SETJMP
7465/**
7466 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7467 * related, longjmp on error.
7468 *
7469 * Raises \#GP(0) if not aligned.
7470 *
7471 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7472 * @param pu128Dst Where to return the qword.
7473 * @param iSegReg The index of the segment register to use for
7474 * this access. The base and limits are checked.
7475 * @param GCPtrMem The address of the guest memory.
7476 */
7477void iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg,
7478 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7479{
7480 /* The lazy approach for now... */
7481 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7482 (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7483 pu128Dst->au64[0] = pu128Src->au64[0];
7484 pu128Dst->au64[1] = pu128Src->au64[1];
7485 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7486 Log9(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7487}
7488#endif
7489
7490
7491/**
7492 * Fetches a data oword (octo word), generally AVX related.
7493 *
7494 * @returns Strict VBox status code.
7495 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7496 * @param pu256Dst Where to return the qword.
7497 * @param iSegReg The index of the segment register to use for
7498 * this access. The base and limits are checked.
7499 * @param GCPtrMem The address of the guest memory.
7500 */
7501VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7502{
7503 /* The lazy approach for now... */
7504 PCRTUINT256U pu256Src;
7505 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7506 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7507 if (rc == VINF_SUCCESS)
7508 {
7509 pu256Dst->au64[0] = pu256Src->au64[0];
7510 pu256Dst->au64[1] = pu256Src->au64[1];
7511 pu256Dst->au64[2] = pu256Src->au64[2];
7512 pu256Dst->au64[3] = pu256Src->au64[3];
7513 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7514 Log9(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7515 }
7516 return rc;
7517}
7518
7519
7520#ifdef IEM_WITH_SETJMP
7521/**
7522 * Fetches a data oword (octo word), generally AVX related.
7523 *
7524 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7525 * @param pu256Dst Where to return the qword.
7526 * @param iSegReg The index of the segment register to use for
7527 * this access. The base and limits are checked.
7528 * @param GCPtrMem The address of the guest memory.
7529 */
7530void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7531{
7532 /* The lazy approach for now... */
7533 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem,
7534 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7535 pu256Dst->au64[0] = pu256Src->au64[0];
7536 pu256Dst->au64[1] = pu256Src->au64[1];
7537 pu256Dst->au64[2] = pu256Src->au64[2];
7538 pu256Dst->au64[3] = pu256Src->au64[3];
7539 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7540 Log9(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7541}
7542#endif
7543
7544
7545/**
7546 * Fetches a data oword (octo word) at an aligned address, generally AVX
7547 * related.
7548 *
7549 * Raises \#GP(0) if not aligned.
7550 *
7551 * @returns Strict VBox status code.
7552 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7553 * @param pu256Dst Where to return the qword.
7554 * @param iSegReg The index of the segment register to use for
7555 * this access. The base and limits are checked.
7556 * @param GCPtrMem The address of the guest memory.
7557 */
7558VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7559{
7560 /* The lazy approach for now... */
7561 PCRTUINT256U pu256Src;
7562 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7563 IEM_ACCESS_DATA_R, (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7564 if (rc == VINF_SUCCESS)
7565 {
7566 pu256Dst->au64[0] = pu256Src->au64[0];
7567 pu256Dst->au64[1] = pu256Src->au64[1];
7568 pu256Dst->au64[2] = pu256Src->au64[2];
7569 pu256Dst->au64[3] = pu256Src->au64[3];
7570 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7571 Log9(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7572 }
7573 return rc;
7574}
7575
7576
7577#ifdef IEM_WITH_SETJMP
7578/**
7579 * Fetches a data oword (octo word) at an aligned address, generally AVX
7580 * related, longjmp on error.
7581 *
7582 * Raises \#GP(0) if not aligned.
7583 *
7584 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7585 * @param pu256Dst Where to return the qword.
7586 * @param iSegReg The index of the segment register to use for
7587 * this access. The base and limits are checked.
7588 * @param GCPtrMem The address of the guest memory.
7589 */
7590void iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg,
7591 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7592{
7593 /* The lazy approach for now... */
7594 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7595 (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7596 pu256Dst->au64[0] = pu256Src->au64[0];
7597 pu256Dst->au64[1] = pu256Src->au64[1];
7598 pu256Dst->au64[2] = pu256Src->au64[2];
7599 pu256Dst->au64[3] = pu256Src->au64[3];
7600 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7601 Log9(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7602}
7603#endif
7604
7605
7606
7607/**
7608 * Fetches a descriptor register (lgdt, lidt).
7609 *
7610 * @returns Strict VBox status code.
7611 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7612 * @param pcbLimit Where to return the limit.
7613 * @param pGCPtrBase Where to return the base.
7614 * @param iSegReg The index of the segment register to use for
7615 * this access. The base and limits are checked.
7616 * @param GCPtrMem The address of the guest memory.
7617 * @param enmOpSize The effective operand size.
7618 */
7619VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7620 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7621{
7622 /*
7623 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7624 * little special:
7625 * - The two reads are done separately.
7626 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7627 * - We suspect the 386 to actually commit the limit before the base in
7628 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7629 * don't try emulate this eccentric behavior, because it's not well
7630 * enough understood and rather hard to trigger.
7631 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7632 */
7633 VBOXSTRICTRC rcStrict;
7634 if (IEM_IS_64BIT_CODE(pVCpu))
7635 {
7636 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7637 if (rcStrict == VINF_SUCCESS)
7638 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7639 }
7640 else
7641 {
7642 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7643 if (enmOpSize == IEMMODE_32BIT)
7644 {
7645 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7646 {
7647 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7648 if (rcStrict == VINF_SUCCESS)
7649 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7650 }
7651 else
7652 {
7653 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7654 if (rcStrict == VINF_SUCCESS)
7655 {
7656 *pcbLimit = (uint16_t)uTmp;
7657 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7658 }
7659 }
7660 if (rcStrict == VINF_SUCCESS)
7661 *pGCPtrBase = uTmp;
7662 }
7663 else
7664 {
7665 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7666 if (rcStrict == VINF_SUCCESS)
7667 {
7668 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7669 if (rcStrict == VINF_SUCCESS)
7670 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7671 }
7672 }
7673 }
7674 return rcStrict;
7675}
7676
7677
7678
7679/**
7680 * Stores a data byte.
7681 *
7682 * @returns Strict VBox status code.
7683 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7684 * @param iSegReg The index of the segment register to use for
7685 * this access. The base and limits are checked.
7686 * @param GCPtrMem The address of the guest memory.
7687 * @param u8Value The value to store.
7688 */
7689VBOXSTRICTRC iemMemStoreDataU8(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) RT_NOEXCEPT
7690{
7691 /* The lazy approach for now... */
7692 uint8_t *pu8Dst;
7693 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 0);
7694 if (rc == VINF_SUCCESS)
7695 {
7696 *pu8Dst = u8Value;
7697 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7698 Log8(("IEM WR byte %d|%RGv: %#04x\n", iSegReg, GCPtrMem, u8Value));
7699 }
7700 return rc;
7701}
7702
7703
7704#ifdef IEM_WITH_SETJMP
7705/**
7706 * Stores a data byte, longjmp on error.
7707 *
7708 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7709 * @param iSegReg The index of the segment register to use for
7710 * this access. The base and limits are checked.
7711 * @param GCPtrMem The address of the guest memory.
7712 * @param u8Value The value to store.
7713 */
7714void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) IEM_NOEXCEPT_MAY_LONGJMP
7715{
7716 /* The lazy approach for now... */
7717 Log8(("IEM WR byte %d|%RGv: %#04x\n", iSegReg, GCPtrMem, u8Value));
7718 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 0);
7719 *pu8Dst = u8Value;
7720 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7721}
7722#endif
7723
7724
7725/**
7726 * Stores a data word.
7727 *
7728 * @returns Strict VBox status code.
7729 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7730 * @param iSegReg The index of the segment register to use for
7731 * this access. The base and limits are checked.
7732 * @param GCPtrMem The address of the guest memory.
7733 * @param u16Value The value to store.
7734 */
7735VBOXSTRICTRC iemMemStoreDataU16(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) RT_NOEXCEPT
7736{
7737 /* The lazy approach for now... */
7738 uint16_t *pu16Dst;
7739 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem,
7740 IEM_ACCESS_DATA_W, sizeof(*pu16Dst) - 1);
7741 if (rc == VINF_SUCCESS)
7742 {
7743 *pu16Dst = u16Value;
7744 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7745 Log8(("IEM WR word %d|%RGv: %#06x\n", iSegReg, GCPtrMem, u16Value));
7746 }
7747 return rc;
7748}
7749
7750
7751#ifdef IEM_WITH_SETJMP
7752/**
7753 * Stores a data word, longjmp on error.
7754 *
7755 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7756 * @param iSegReg The index of the segment register to use for
7757 * this access. The base and limits are checked.
7758 * @param GCPtrMem The address of the guest memory.
7759 * @param u16Value The value to store.
7760 */
7761void iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) IEM_NOEXCEPT_MAY_LONGJMP
7762{
7763 /* The lazy approach for now... */
7764 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem,
7765 IEM_ACCESS_DATA_W, sizeof(*pu16Dst) - 1);
7766 *pu16Dst = u16Value;
7767 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7768 Log8(("IEM WR word %d|%RGv: %#06x\n", iSegReg, GCPtrMem, u16Value));
7769}
7770#endif
7771
7772
7773/**
7774 * Stores a data dword.
7775 *
7776 * @returns Strict VBox status code.
7777 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7778 * @param iSegReg The index of the segment register to use for
7779 * this access. The base and limits are checked.
7780 * @param GCPtrMem The address of the guest memory.
7781 * @param u32Value The value to store.
7782 */
7783VBOXSTRICTRC iemMemStoreDataU32(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) RT_NOEXCEPT
7784{
7785 /* The lazy approach for now... */
7786 uint32_t *pu32Dst;
7787 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem,
7788 IEM_ACCESS_DATA_W, sizeof(*pu32Dst) - 1);
7789 if (rc == VINF_SUCCESS)
7790 {
7791 *pu32Dst = u32Value;
7792 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7793 Log8(("IEM WR dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, u32Value));
7794 }
7795 return rc;
7796}
7797
7798
7799#ifdef IEM_WITH_SETJMP
7800/**
7801 * Stores a data dword.
7802 *
7803 * @returns Strict VBox status code.
7804 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7805 * @param iSegReg The index of the segment register to use for
7806 * this access. The base and limits are checked.
7807 * @param GCPtrMem The address of the guest memory.
7808 * @param u32Value The value to store.
7809 */
7810void iemMemStoreDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) IEM_NOEXCEPT_MAY_LONGJMP
7811{
7812 /* The lazy approach for now... */
7813 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem,
7814 IEM_ACCESS_DATA_W, sizeof(*pu32Dst) - 1);
7815 *pu32Dst = u32Value;
7816 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7817 Log8(("IEM WR dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, u32Value));
7818}
7819#endif
7820
7821
7822/**
7823 * Stores a data qword.
7824 *
7825 * @returns Strict VBox status code.
7826 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7827 * @param iSegReg The index of the segment register to use for
7828 * this access. The base and limits are checked.
7829 * @param GCPtrMem The address of the guest memory.
7830 * @param u64Value The value to store.
7831 */
7832VBOXSTRICTRC iemMemStoreDataU64(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) RT_NOEXCEPT
7833{
7834 /* The lazy approach for now... */
7835 uint64_t *pu64Dst;
7836 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem,
7837 IEM_ACCESS_DATA_W, sizeof(*pu64Dst) - 1);
7838 if (rc == VINF_SUCCESS)
7839 {
7840 *pu64Dst = u64Value;
7841 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7842 Log8(("IEM WR qword %d|%RGv: %#018RX64\n", iSegReg, GCPtrMem, u64Value));
7843 }
7844 return rc;
7845}
7846
7847
7848#ifdef IEM_WITH_SETJMP
7849/**
7850 * Stores a data qword, longjmp on error.
7851 *
7852 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7853 * @param iSegReg The index of the segment register to use for
7854 * this access. The base and limits are checked.
7855 * @param GCPtrMem The address of the guest memory.
7856 * @param u64Value The value to store.
7857 */
7858void iemMemStoreDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) IEM_NOEXCEPT_MAY_LONGJMP
7859{
7860 /* The lazy approach for now... */
7861 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem,
7862 IEM_ACCESS_DATA_W, sizeof(*pu64Dst) - 1);
7863 *pu64Dst = u64Value;
7864 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7865 Log8(("IEM WR qword %d|%RGv: %#018RX64\n", iSegReg, GCPtrMem, u64Value));
7866}
7867#endif
7868
7869
7870/**
7871 * Stores a data dqword.
7872 *
7873 * @returns Strict VBox status code.
7874 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7875 * @param iSegReg The index of the segment register to use for
7876 * this access. The base and limits are checked.
7877 * @param GCPtrMem The address of the guest memory.
7878 * @param u128Value The value to store.
7879 */
7880VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7881{
7882 /* The lazy approach for now... */
7883 PRTUINT128U pu128Dst;
7884 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7885 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7886 if (rc == VINF_SUCCESS)
7887 {
7888 pu128Dst->au64[0] = u128Value.au64[0];
7889 pu128Dst->au64[1] = u128Value.au64[1];
7890 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7891 Log8(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7892 }
7893 return rc;
7894}
7895
7896
7897#ifdef IEM_WITH_SETJMP
7898/**
7899 * Stores a data dqword, longjmp on error.
7900 *
7901 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7902 * @param iSegReg The index of the segment register to use for
7903 * this access. The base and limits are checked.
7904 * @param GCPtrMem The address of the guest memory.
7905 * @param u128Value The value to store.
7906 */
7907void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7908{
7909 /* The lazy approach for now... */
7910 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7911 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7912 pu128Dst->au64[0] = u128Value.au64[0];
7913 pu128Dst->au64[1] = u128Value.au64[1];
7914 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7915 Log8(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7916}
7917#endif
7918
7919
7920/**
7921 * Stores a data dqword, SSE aligned.
7922 *
7923 * @returns Strict VBox status code.
7924 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7925 * @param iSegReg The index of the segment register to use for
7926 * this access. The base and limits are checked.
7927 * @param GCPtrMem The address of the guest memory.
7928 * @param u128Value The value to store.
7929 */
7930VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7931{
7932 /* The lazy approach for now... */
7933 PRTUINT128U pu128Dst;
7934 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7935 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7936 if (rc == VINF_SUCCESS)
7937 {
7938 pu128Dst->au64[0] = u128Value.au64[0];
7939 pu128Dst->au64[1] = u128Value.au64[1];
7940 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7941 Log8(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7942 }
7943 return rc;
7944}
7945
7946
7947#ifdef IEM_WITH_SETJMP
7948/**
7949 * Stores a data dqword, SSE aligned.
7950 *
7951 * @returns Strict VBox status code.
7952 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7953 * @param iSegReg The index of the segment register to use for
7954 * this access. The base and limits are checked.
7955 * @param GCPtrMem The address of the guest memory.
7956 * @param u128Value The value to store.
7957 */
7958void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7959 RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7960{
7961 /* The lazy approach for now... */
7962 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7963 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7964 pu128Dst->au64[0] = u128Value.au64[0];
7965 pu128Dst->au64[1] = u128Value.au64[1];
7966 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7967 Log8(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7968}
7969#endif
7970
7971
7972/**
7973 * Stores a data dqword.
7974 *
7975 * @returns Strict VBox status code.
7976 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7977 * @param iSegReg The index of the segment register to use for
7978 * this access. The base and limits are checked.
7979 * @param GCPtrMem The address of the guest memory.
7980 * @param pu256Value Pointer to the value to store.
7981 */
7982VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7983{
7984 /* The lazy approach for now... */
7985 PRTUINT256U pu256Dst;
7986 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7987 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7988 if (rc == VINF_SUCCESS)
7989 {
7990 pu256Dst->au64[0] = pu256Value->au64[0];
7991 pu256Dst->au64[1] = pu256Value->au64[1];
7992 pu256Dst->au64[2] = pu256Value->au64[2];
7993 pu256Dst->au64[3] = pu256Value->au64[3];
7994 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7995 Log8(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7996 }
7997 return rc;
7998}
7999
8000
8001#ifdef IEM_WITH_SETJMP
8002/**
8003 * Stores a data dqword, longjmp on error.
8004 *
8005 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8006 * @param iSegReg The index of the segment register to use for
8007 * this access. The base and limits are checked.
8008 * @param GCPtrMem The address of the guest memory.
8009 * @param pu256Value Pointer to the value to store.
8010 */
8011void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
8012{
8013 /* The lazy approach for now... */
8014 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
8015 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
8016 pu256Dst->au64[0] = pu256Value->au64[0];
8017 pu256Dst->au64[1] = pu256Value->au64[1];
8018 pu256Dst->au64[2] = pu256Value->au64[2];
8019 pu256Dst->au64[3] = pu256Value->au64[3];
8020 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
8021 Log8(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
8022}
8023#endif
8024
8025
8026/**
8027 * Stores a data dqword, AVX \#GP(0) aligned.
8028 *
8029 * @returns Strict VBox status code.
8030 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8031 * @param iSegReg The index of the segment register to use for
8032 * this access. The base and limits are checked.
8033 * @param GCPtrMem The address of the guest memory.
8034 * @param pu256Value Pointer to the value to store.
8035 */
8036VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
8037{
8038 /* The lazy approach for now... */
8039 PRTUINT256U pu256Dst;
8040 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
8041 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
8042 if (rc == VINF_SUCCESS)
8043 {
8044 pu256Dst->au64[0] = pu256Value->au64[0];
8045 pu256Dst->au64[1] = pu256Value->au64[1];
8046 pu256Dst->au64[2] = pu256Value->au64[2];
8047 pu256Dst->au64[3] = pu256Value->au64[3];
8048 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
8049 Log8(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
8050 }
8051 return rc;
8052}
8053
8054
8055#ifdef IEM_WITH_SETJMP
8056/**
8057 * Stores a data dqword, AVX aligned.
8058 *
8059 * @returns Strict VBox status code.
8060 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8061 * @param iSegReg The index of the segment register to use for
8062 * this access. The base and limits are checked.
8063 * @param GCPtrMem The address of the guest memory.
8064 * @param pu256Value Pointer to the value to store.
8065 */
8066void iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
8067 PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
8068{
8069 /* The lazy approach for now... */
8070 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
8071 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
8072 pu256Dst->au64[0] = pu256Value->au64[0];
8073 pu256Dst->au64[1] = pu256Value->au64[1];
8074 pu256Dst->au64[2] = pu256Value->au64[2];
8075 pu256Dst->au64[3] = pu256Value->au64[3];
8076 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
8077 Log8(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
8078}
8079#endif
8080
8081
8082/**
8083 * Stores a descriptor register (sgdt, sidt).
8084 *
8085 * @returns Strict VBox status code.
8086 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8087 * @param cbLimit The limit.
8088 * @param GCPtrBase The base address.
8089 * @param iSegReg The index of the segment register to use for
8090 * this access. The base and limits are checked.
8091 * @param GCPtrMem The address of the guest memory.
8092 */
8093VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8094{
8095 /*
8096 * The SIDT and SGDT instructions actually stores the data using two
8097 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
8098 * does not respond to opsize prefixes.
8099 */
8100 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
8101 if (rcStrict == VINF_SUCCESS)
8102 {
8103 if (IEM_IS_16BIT_CODE(pVCpu))
8104 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
8105 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
8106 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
8107 else if (IEM_IS_32BIT_CODE(pVCpu))
8108 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
8109 else
8110 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
8111 }
8112 return rcStrict;
8113}
8114
8115
8116/**
8117 * Pushes a word onto the stack.
8118 *
8119 * @returns Strict VBox status code.
8120 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8121 * @param u16Value The value to push.
8122 */
8123VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value) RT_NOEXCEPT
8124{
8125 /* Increment the stack pointer. */
8126 uint64_t uNewRsp;
8127 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
8128
8129 /* Write the word the lazy way. */
8130 uint16_t *pu16Dst;
8131 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop,
8132 IEM_ACCESS_STACK_W, sizeof(*pu16Dst) - 1);
8133 if (rc == VINF_SUCCESS)
8134 {
8135 *pu16Dst = u16Value;
8136 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
8137 }
8138
8139 /* Commit the new RSP value unless we an access handler made trouble. */
8140 if (rc == VINF_SUCCESS)
8141 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8142
8143 return rc;
8144}
8145
8146
8147/**
8148 * Pushes a dword onto the stack.
8149 *
8150 * @returns Strict VBox status code.
8151 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8152 * @param u32Value The value to push.
8153 */
8154VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
8155{
8156 /* Increment the stack pointer. */
8157 uint64_t uNewRsp;
8158 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
8159
8160 /* Write the dword the lazy way. */
8161 uint32_t *pu32Dst;
8162 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop,
8163 IEM_ACCESS_STACK_W, sizeof(*pu32Dst) - 1);
8164 if (rc == VINF_SUCCESS)
8165 {
8166 *pu32Dst = u32Value;
8167 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
8168 }
8169
8170 /* Commit the new RSP value unless we an access handler made trouble. */
8171 if (rc == VINF_SUCCESS)
8172 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8173
8174 return rc;
8175}
8176
8177
8178/**
8179 * Pushes a dword segment register value onto the stack.
8180 *
8181 * @returns Strict VBox status code.
8182 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8183 * @param u32Value The value to push.
8184 */
8185VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
8186{
8187 /* Increment the stack pointer. */
8188 uint64_t uNewRsp;
8189 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
8190
8191 /* The intel docs talks about zero extending the selector register
8192 value. My actual intel CPU here might be zero extending the value
8193 but it still only writes the lower word... */
8194 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
8195 * happens when crossing an electric page boundrary, is the high word checked
8196 * for write accessibility or not? Probably it is. What about segment limits?
8197 * It appears this behavior is also shared with trap error codes.
8198 *
8199 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
8200 * ancient hardware when it actually did change. */
8201 uint16_t *pu16Dst;
8202 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop,
8203 IEM_ACCESS_STACK_RW, sizeof(*pu16Dst) - 1); /** @todo 2 or 4 alignment check for PUSH SS? */
8204 if (rc == VINF_SUCCESS)
8205 {
8206 *pu16Dst = (uint16_t)u32Value;
8207 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
8208 }
8209
8210 /* Commit the new RSP value unless we an access handler made trouble. */
8211 if (rc == VINF_SUCCESS)
8212 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8213
8214 return rc;
8215}
8216
8217
8218/**
8219 * Pushes a qword onto the stack.
8220 *
8221 * @returns Strict VBox status code.
8222 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8223 * @param u64Value The value to push.
8224 */
8225VBOXSTRICTRC iemMemStackPushU64(PVMCPUCC pVCpu, uint64_t u64Value) RT_NOEXCEPT
8226{
8227 /* Increment the stack pointer. */
8228 uint64_t uNewRsp;
8229 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
8230
8231 /* Write the word the lazy way. */
8232 uint64_t *pu64Dst;
8233 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop,
8234 IEM_ACCESS_STACK_W, sizeof(*pu64Dst) - 1);
8235 if (rc == VINF_SUCCESS)
8236 {
8237 *pu64Dst = u64Value;
8238 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
8239 }
8240
8241 /* Commit the new RSP value unless we an access handler made trouble. */
8242 if (rc == VINF_SUCCESS)
8243 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8244
8245 return rc;
8246}
8247
8248
8249/**
8250 * Pops a word from the stack.
8251 *
8252 * @returns Strict VBox status code.
8253 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8254 * @param pu16Value Where to store the popped value.
8255 */
8256VBOXSTRICTRC iemMemStackPopU16(PVMCPUCC pVCpu, uint16_t *pu16Value) RT_NOEXCEPT
8257{
8258 /* Increment the stack pointer. */
8259 uint64_t uNewRsp;
8260 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
8261
8262 /* Write the word the lazy way. */
8263 uint16_t const *pu16Src;
8264 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop,
8265 IEM_ACCESS_STACK_R, sizeof(*pu16Src) - 1);
8266 if (rc == VINF_SUCCESS)
8267 {
8268 *pu16Value = *pu16Src;
8269 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
8270
8271 /* Commit the new RSP value. */
8272 if (rc == VINF_SUCCESS)
8273 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8274 }
8275
8276 return rc;
8277}
8278
8279
8280/**
8281 * Pops a dword from the stack.
8282 *
8283 * @returns Strict VBox status code.
8284 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8285 * @param pu32Value Where to store the popped value.
8286 */
8287VBOXSTRICTRC iemMemStackPopU32(PVMCPUCC pVCpu, uint32_t *pu32Value) RT_NOEXCEPT
8288{
8289 /* Increment the stack pointer. */
8290 uint64_t uNewRsp;
8291 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
8292
8293 /* Write the word the lazy way. */
8294 uint32_t const *pu32Src;
8295 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop,
8296 IEM_ACCESS_STACK_R, sizeof(*pu32Src) - 1);
8297 if (rc == VINF_SUCCESS)
8298 {
8299 *pu32Value = *pu32Src;
8300 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
8301
8302 /* Commit the new RSP value. */
8303 if (rc == VINF_SUCCESS)
8304 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8305 }
8306
8307 return rc;
8308}
8309
8310
8311/**
8312 * Pops a qword from the stack.
8313 *
8314 * @returns Strict VBox status code.
8315 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8316 * @param pu64Value Where to store the popped value.
8317 */
8318VBOXSTRICTRC iemMemStackPopU64(PVMCPUCC pVCpu, uint64_t *pu64Value) RT_NOEXCEPT
8319{
8320 /* Increment the stack pointer. */
8321 uint64_t uNewRsp;
8322 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
8323
8324 /* Write the word the lazy way. */
8325 uint64_t const *pu64Src;
8326 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop,
8327 IEM_ACCESS_STACK_R, sizeof(*pu64Src) - 1);
8328 if (rc == VINF_SUCCESS)
8329 {
8330 *pu64Value = *pu64Src;
8331 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8332
8333 /* Commit the new RSP value. */
8334 if (rc == VINF_SUCCESS)
8335 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8336 }
8337
8338 return rc;
8339}
8340
8341
8342/**
8343 * Pushes a word onto the stack, using a temporary stack pointer.
8344 *
8345 * @returns Strict VBox status code.
8346 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8347 * @param u16Value The value to push.
8348 * @param pTmpRsp Pointer to the temporary stack pointer.
8349 */
8350VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPUCC pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8351{
8352 /* Increment the stack pointer. */
8353 RTUINT64U NewRsp = *pTmpRsp;
8354 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
8355
8356 /* Write the word the lazy way. */
8357 uint16_t *pu16Dst;
8358 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop,
8359 IEM_ACCESS_STACK_W, sizeof(*pu16Dst) - 1);
8360 if (rc == VINF_SUCCESS)
8361 {
8362 *pu16Dst = u16Value;
8363 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
8364 }
8365
8366 /* Commit the new RSP value unless we an access handler made trouble. */
8367 if (rc == VINF_SUCCESS)
8368 *pTmpRsp = NewRsp;
8369
8370 return rc;
8371}
8372
8373
8374/**
8375 * Pushes a dword onto the stack, using a temporary stack pointer.
8376 *
8377 * @returns Strict VBox status code.
8378 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8379 * @param u32Value The value to push.
8380 * @param pTmpRsp Pointer to the temporary stack pointer.
8381 */
8382VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPUCC pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8383{
8384 /* Increment the stack pointer. */
8385 RTUINT64U NewRsp = *pTmpRsp;
8386 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
8387
8388 /* Write the word the lazy way. */
8389 uint32_t *pu32Dst;
8390 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop,
8391 IEM_ACCESS_STACK_W, sizeof(*pu32Dst) - 1);
8392 if (rc == VINF_SUCCESS)
8393 {
8394 *pu32Dst = u32Value;
8395 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
8396 }
8397
8398 /* Commit the new RSP value unless we an access handler made trouble. */
8399 if (rc == VINF_SUCCESS)
8400 *pTmpRsp = NewRsp;
8401
8402 return rc;
8403}
8404
8405
8406/**
8407 * Pushes a dword onto the stack, using a temporary stack pointer.
8408 *
8409 * @returns Strict VBox status code.
8410 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8411 * @param u64Value The value to push.
8412 * @param pTmpRsp Pointer to the temporary stack pointer.
8413 */
8414VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPUCC pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8415{
8416 /* Increment the stack pointer. */
8417 RTUINT64U NewRsp = *pTmpRsp;
8418 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
8419
8420 /* Write the word the lazy way. */
8421 uint64_t *pu64Dst;
8422 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop,
8423 IEM_ACCESS_STACK_W, sizeof(*pu64Dst) - 1);
8424 if (rc == VINF_SUCCESS)
8425 {
8426 *pu64Dst = u64Value;
8427 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
8428 }
8429
8430 /* Commit the new RSP value unless we an access handler made trouble. */
8431 if (rc == VINF_SUCCESS)
8432 *pTmpRsp = NewRsp;
8433
8434 return rc;
8435}
8436
8437
8438/**
8439 * Pops a word from the stack, using a temporary stack pointer.
8440 *
8441 * @returns Strict VBox status code.
8442 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8443 * @param pu16Value Where to store the popped value.
8444 * @param pTmpRsp Pointer to the temporary stack pointer.
8445 */
8446VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPUCC pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8447{
8448 /* Increment the stack pointer. */
8449 RTUINT64U NewRsp = *pTmpRsp;
8450 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
8451
8452 /* Write the word the lazy way. */
8453 uint16_t const *pu16Src;
8454 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop,
8455 IEM_ACCESS_STACK_R, sizeof(*pu16Src) - 1);
8456 if (rc == VINF_SUCCESS)
8457 {
8458 *pu16Value = *pu16Src;
8459 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
8460
8461 /* Commit the new RSP value. */
8462 if (rc == VINF_SUCCESS)
8463 *pTmpRsp = NewRsp;
8464 }
8465
8466 return rc;
8467}
8468
8469
8470/**
8471 * Pops a dword from the stack, using a temporary stack pointer.
8472 *
8473 * @returns Strict VBox status code.
8474 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8475 * @param pu32Value Where to store the popped value.
8476 * @param pTmpRsp Pointer to the temporary stack pointer.
8477 */
8478VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPUCC pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8479{
8480 /* Increment the stack pointer. */
8481 RTUINT64U NewRsp = *pTmpRsp;
8482 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
8483
8484 /* Write the word the lazy way. */
8485 uint32_t const *pu32Src;
8486 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop,
8487 IEM_ACCESS_STACK_R, sizeof(*pu32Src) - 1);
8488 if (rc == VINF_SUCCESS)
8489 {
8490 *pu32Value = *pu32Src;
8491 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
8492
8493 /* Commit the new RSP value. */
8494 if (rc == VINF_SUCCESS)
8495 *pTmpRsp = NewRsp;
8496 }
8497
8498 return rc;
8499}
8500
8501
8502/**
8503 * Pops a qword from the stack, using a temporary stack pointer.
8504 *
8505 * @returns Strict VBox status code.
8506 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8507 * @param pu64Value Where to store the popped value.
8508 * @param pTmpRsp Pointer to the temporary stack pointer.
8509 */
8510VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPUCC pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8511{
8512 /* Increment the stack pointer. */
8513 RTUINT64U NewRsp = *pTmpRsp;
8514 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
8515
8516 /* Write the word the lazy way. */
8517 uint64_t const *pu64Src;
8518 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop,
8519 IEM_ACCESS_STACK_R, sizeof(*pu64Src) - 1);
8520 if (rcStrict == VINF_SUCCESS)
8521 {
8522 *pu64Value = *pu64Src;
8523 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8524
8525 /* Commit the new RSP value. */
8526 if (rcStrict == VINF_SUCCESS)
8527 *pTmpRsp = NewRsp;
8528 }
8529
8530 return rcStrict;
8531}
8532
8533
8534/**
8535 * Begin a special stack push (used by interrupt, exceptions and such).
8536 *
8537 * This will raise \#SS or \#PF if appropriate.
8538 *
8539 * @returns Strict VBox status code.
8540 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8541 * @param cbMem The number of bytes to push onto the stack.
8542 * @param cbAlign The alignment mask (7, 3, 1).
8543 * @param ppvMem Where to return the pointer to the stack memory.
8544 * As with the other memory functions this could be
8545 * direct access or bounce buffered access, so
8546 * don't commit register until the commit call
8547 * succeeds.
8548 * @param puNewRsp Where to return the new RSP value. This must be
8549 * passed unchanged to
8550 * iemMemStackPushCommitSpecial().
8551 */
8552VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8553 void **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8554{
8555 Assert(cbMem < UINT8_MAX);
8556 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
8557 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop,
8558 IEM_ACCESS_STACK_W, cbAlign);
8559}
8560
8561
8562/**
8563 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
8564 *
8565 * This will update the rSP.
8566 *
8567 * @returns Strict VBox status code.
8568 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8569 * @param pvMem The pointer returned by
8570 * iemMemStackPushBeginSpecial().
8571 * @param uNewRsp The new RSP value returned by
8572 * iemMemStackPushBeginSpecial().
8573 */
8574VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp) RT_NOEXCEPT
8575{
8576 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
8577 if (rcStrict == VINF_SUCCESS)
8578 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8579 return rcStrict;
8580}
8581
8582
8583/**
8584 * Begin a special stack pop (used by iret, retf and such).
8585 *
8586 * This will raise \#SS or \#PF if appropriate.
8587 *
8588 * @returns Strict VBox status code.
8589 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8590 * @param cbMem The number of bytes to pop from the stack.
8591 * @param cbAlign The alignment mask (7, 3, 1).
8592 * @param ppvMem Where to return the pointer to the stack memory.
8593 * @param puNewRsp Where to return the new RSP value. This must be
8594 * assigned to CPUMCTX::rsp manually some time
8595 * after iemMemStackPopDoneSpecial() has been
8596 * called.
8597 */
8598VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8599 void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8600{
8601 Assert(cbMem < UINT8_MAX);
8602 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
8603 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
8604}
8605
8606
8607/**
8608 * Continue a special stack pop (used by iret and retf), for the purpose of
8609 * retrieving a new stack pointer.
8610 *
8611 * This will raise \#SS or \#PF if appropriate.
8612 *
8613 * @returns Strict VBox status code.
8614 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8615 * @param off Offset from the top of the stack. This is zero
8616 * except in the retf case.
8617 * @param cbMem The number of bytes to pop from the stack.
8618 * @param ppvMem Where to return the pointer to the stack memory.
8619 * @param uCurNewRsp The current uncommitted RSP value. (No need to
8620 * return this because all use of this function is
8621 * to retrieve a new value and anything we return
8622 * here would be discarded.)
8623 */
8624VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
8625 void const **ppvMem, uint64_t uCurNewRsp) RT_NOEXCEPT
8626{
8627 Assert(cbMem < UINT8_MAX);
8628
8629 /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */
8630 RTGCPTR GCPtrTop;
8631 if (IEM_IS_64BIT_CODE(pVCpu))
8632 GCPtrTop = uCurNewRsp;
8633 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
8634 GCPtrTop = (uint32_t)uCurNewRsp;
8635 else
8636 GCPtrTop = (uint16_t)uCurNewRsp;
8637
8638 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
8639 0 /* checked in iemMemStackPopBeginSpecial */);
8640}
8641
8642
8643/**
8644 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
8645 * iemMemStackPopContinueSpecial).
8646 *
8647 * The caller will manually commit the rSP.
8648 *
8649 * @returns Strict VBox status code.
8650 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8651 * @param pvMem The pointer returned by
8652 * iemMemStackPopBeginSpecial() or
8653 * iemMemStackPopContinueSpecial().
8654 */
8655VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem) RT_NOEXCEPT
8656{
8657 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8658}
8659
8660
8661/**
8662 * Fetches a system table byte.
8663 *
8664 * @returns Strict VBox status code.
8665 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8666 * @param pbDst Where to return the byte.
8667 * @param iSegReg The index of the segment register to use for
8668 * this access. The base and limits are checked.
8669 * @param GCPtrMem The address of the guest memory.
8670 */
8671VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8672{
8673 /* The lazy approach for now... */
8674 uint8_t const *pbSrc;
8675 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8676 if (rc == VINF_SUCCESS)
8677 {
8678 *pbDst = *pbSrc;
8679 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
8680 }
8681 return rc;
8682}
8683
8684
8685/**
8686 * Fetches a system table word.
8687 *
8688 * @returns Strict VBox status code.
8689 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8690 * @param pu16Dst Where to return the word.
8691 * @param iSegReg The index of the segment register to use for
8692 * this access. The base and limits are checked.
8693 * @param GCPtrMem The address of the guest memory.
8694 */
8695VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8696{
8697 /* The lazy approach for now... */
8698 uint16_t const *pu16Src;
8699 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8700 if (rc == VINF_SUCCESS)
8701 {
8702 *pu16Dst = *pu16Src;
8703 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
8704 }
8705 return rc;
8706}
8707
8708
8709/**
8710 * Fetches a system table dword.
8711 *
8712 * @returns Strict VBox status code.
8713 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8714 * @param pu32Dst Where to return the dword.
8715 * @param iSegReg The index of the segment register to use for
8716 * this access. The base and limits are checked.
8717 * @param GCPtrMem The address of the guest memory.
8718 */
8719VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8720{
8721 /* The lazy approach for now... */
8722 uint32_t const *pu32Src;
8723 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8724 if (rc == VINF_SUCCESS)
8725 {
8726 *pu32Dst = *pu32Src;
8727 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
8728 }
8729 return rc;
8730}
8731
8732
8733/**
8734 * Fetches a system table qword.
8735 *
8736 * @returns Strict VBox status code.
8737 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8738 * @param pu64Dst Where to return the qword.
8739 * @param iSegReg The index of the segment register to use for
8740 * this access. The base and limits are checked.
8741 * @param GCPtrMem The address of the guest memory.
8742 */
8743VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8744{
8745 /* The lazy approach for now... */
8746 uint64_t const *pu64Src;
8747 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8748 if (rc == VINF_SUCCESS)
8749 {
8750 *pu64Dst = *pu64Src;
8751 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
8752 }
8753 return rc;
8754}
8755
8756
8757/**
8758 * Fetches a descriptor table entry with caller specified error code.
8759 *
8760 * @returns Strict VBox status code.
8761 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8762 * @param pDesc Where to return the descriptor table entry.
8763 * @param uSel The selector which table entry to fetch.
8764 * @param uXcpt The exception to raise on table lookup error.
8765 * @param uErrorCode The error code associated with the exception.
8766 */
8767static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
8768 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
8769{
8770 AssertPtr(pDesc);
8771 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
8772
8773 /** @todo did the 286 require all 8 bytes to be accessible? */
8774 /*
8775 * Get the selector table base and check bounds.
8776 */
8777 RTGCPTR GCPtrBase;
8778 if (uSel & X86_SEL_LDT)
8779 {
8780 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
8781 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
8782 {
8783 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8784 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
8785 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8786 uErrorCode, 0);
8787 }
8788
8789 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
8790 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
8791 }
8792 else
8793 {
8794 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
8795 {
8796 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
8797 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8798 uErrorCode, 0);
8799 }
8800 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
8801 }
8802
8803 /*
8804 * Read the legacy descriptor and maybe the long mode extensions if
8805 * required.
8806 */
8807 VBOXSTRICTRC rcStrict;
8808 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
8809 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8810 else
8811 {
8812 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
8813 if (rcStrict == VINF_SUCCESS)
8814 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
8815 if (rcStrict == VINF_SUCCESS)
8816 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
8817 if (rcStrict == VINF_SUCCESS)
8818 pDesc->Legacy.au16[3] = 0;
8819 else
8820 return rcStrict;
8821 }
8822
8823 if (rcStrict == VINF_SUCCESS)
8824 {
8825 if ( !IEM_IS_LONG_MODE(pVCpu)
8826 || pDesc->Legacy.Gen.u1DescType)
8827 pDesc->Long.au64[1] = 0;
8828 else if ( (uint32_t)(uSel | X86_SEL_RPL_LDT) + 8
8829 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
8830 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8831 else
8832 {
8833 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8834 /** @todo is this the right exception? */
8835 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8836 }
8837 }
8838 return rcStrict;
8839}
8840
8841
8842/**
8843 * Fetches a descriptor table entry.
8844 *
8845 * @returns Strict VBox status code.
8846 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8847 * @param pDesc Where to return the descriptor table entry.
8848 * @param uSel The selector which table entry to fetch.
8849 * @param uXcpt The exception to raise on table lookup error.
8850 */
8851VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
8852{
8853 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8854}
8855
8856
8857/**
8858 * Marks the selector descriptor as accessed (only non-system descriptors).
8859 *
8860 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8861 * will therefore skip the limit checks.
8862 *
8863 * @returns Strict VBox status code.
8864 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8865 * @param uSel The selector.
8866 */
8867VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
8868{
8869 /*
8870 * Get the selector table base and calculate the entry address.
8871 */
8872 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8873 ? pVCpu->cpum.GstCtx.ldtr.u64Base
8874 : pVCpu->cpum.GstCtx.gdtr.pGdt;
8875 GCPtr += uSel & X86_SEL_MASK;
8876
8877 /*
8878 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8879 * ugly stuff to avoid this. This will make sure it's an atomic access
8880 * as well more or less remove any question about 8-bit or 32-bit accesss.
8881 */
8882 VBOXSTRICTRC rcStrict;
8883 uint32_t volatile *pu32;
8884 if ((GCPtr & 3) == 0)
8885 {
8886 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8887 GCPtr += 2 + 2;
8888 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8889 if (rcStrict != VINF_SUCCESS)
8890 return rcStrict;
8891 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8892 }
8893 else
8894 {
8895 /* The misaligned GDT/LDT case, map the whole thing. */
8896 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8897 if (rcStrict != VINF_SUCCESS)
8898 return rcStrict;
8899 switch ((uintptr_t)pu32 & 3)
8900 {
8901 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8902 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8903 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8904 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8905 }
8906 }
8907
8908 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
8909}
8910
8911/** @} */
8912
8913/** @name Opcode Helpers.
8914 * @{
8915 */
8916
8917/**
8918 * Calculates the effective address of a ModR/M memory operand.
8919 *
8920 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8921 *
8922 * @return Strict VBox status code.
8923 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8924 * @param bRm The ModRM byte.
8925 * @param cbImmAndRspOffset - First byte: The size of any immediate
8926 * following the effective address opcode bytes
8927 * (only for RIP relative addressing).
8928 * - Second byte: RSP displacement (for POP [ESP]).
8929 * @param pGCPtrEff Where to return the effective address.
8930 */
8931VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
8932{
8933 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8934# define SET_SS_DEF() \
8935 do \
8936 { \
8937 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8938 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8939 } while (0)
8940
8941 if (!IEM_IS_64BIT_CODE(pVCpu))
8942 {
8943/** @todo Check the effective address size crap! */
8944 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8945 {
8946 uint16_t u16EffAddr;
8947
8948 /* Handle the disp16 form with no registers first. */
8949 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8950 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8951 else
8952 {
8953 /* Get the displacment. */
8954 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8955 {
8956 case 0: u16EffAddr = 0; break;
8957 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8958 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8959 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8960 }
8961
8962 /* Add the base and index registers to the disp. */
8963 switch (bRm & X86_MODRM_RM_MASK)
8964 {
8965 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8966 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8967 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8968 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8969 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8970 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8971 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8972 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8973 }
8974 }
8975
8976 *pGCPtrEff = u16EffAddr;
8977 }
8978 else
8979 {
8980 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8981 uint32_t u32EffAddr;
8982
8983 /* Handle the disp32 form with no registers first. */
8984 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8985 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8986 else
8987 {
8988 /* Get the register (or SIB) value. */
8989 switch ((bRm & X86_MODRM_RM_MASK))
8990 {
8991 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8992 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8993 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8994 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8995 case 4: /* SIB */
8996 {
8997 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8998
8999 /* Get the index and scale it. */
9000 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9001 {
9002 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9003 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9004 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9005 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9006 case 4: u32EffAddr = 0; /*none */ break;
9007 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9008 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9009 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9010 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9011 }
9012 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9013
9014 /* add base */
9015 switch (bSib & X86_SIB_BASE_MASK)
9016 {
9017 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9018 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9019 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9020 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9021 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9022 case 5:
9023 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9024 {
9025 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9026 SET_SS_DEF();
9027 }
9028 else
9029 {
9030 uint32_t u32Disp;
9031 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9032 u32EffAddr += u32Disp;
9033 }
9034 break;
9035 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9036 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9037 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9038 }
9039 break;
9040 }
9041 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9042 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9043 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9044 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9045 }
9046
9047 /* Get and add the displacement. */
9048 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9049 {
9050 case 0:
9051 break;
9052 case 1:
9053 {
9054 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9055 u32EffAddr += i8Disp;
9056 break;
9057 }
9058 case 2:
9059 {
9060 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9061 u32EffAddr += u32Disp;
9062 break;
9063 }
9064 default:
9065 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
9066 }
9067
9068 }
9069 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9070 *pGCPtrEff = u32EffAddr;
9071 else
9072 {
9073 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9074 *pGCPtrEff = u32EffAddr & UINT16_MAX;
9075 }
9076 }
9077 }
9078 else
9079 {
9080 uint64_t u64EffAddr;
9081
9082 /* Handle the rip+disp32 form with no registers first. */
9083 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9084 {
9085 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9086 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
9087 }
9088 else
9089 {
9090 /* Get the register (or SIB) value. */
9091 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9092 {
9093 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9094 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9095 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9096 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9097 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9098 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9099 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9100 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9101 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9102 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9103 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9104 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9105 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9106 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9107 /* SIB */
9108 case 4:
9109 case 12:
9110 {
9111 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9112
9113 /* Get the index and scale it. */
9114 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9115 {
9116 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9117 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9118 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9119 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9120 case 4: u64EffAddr = 0; /*none */ break;
9121 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9122 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9123 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9124 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9125 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9126 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9127 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9128 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9129 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9130 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9131 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9132 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9133 }
9134 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9135
9136 /* add base */
9137 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9138 {
9139 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9140 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9141 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9142 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9143 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9144 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9145 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9146 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9147 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9148 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9149 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9150 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9151 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9152 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9153 /* complicated encodings */
9154 case 5:
9155 case 13:
9156 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9157 {
9158 if (!pVCpu->iem.s.uRexB)
9159 {
9160 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9161 SET_SS_DEF();
9162 }
9163 else
9164 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9165 }
9166 else
9167 {
9168 uint32_t u32Disp;
9169 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9170 u64EffAddr += (int32_t)u32Disp;
9171 }
9172 break;
9173 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9174 }
9175 break;
9176 }
9177 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9178 }
9179
9180 /* Get and add the displacement. */
9181 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9182 {
9183 case 0:
9184 break;
9185 case 1:
9186 {
9187 int8_t i8Disp;
9188 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9189 u64EffAddr += i8Disp;
9190 break;
9191 }
9192 case 2:
9193 {
9194 uint32_t u32Disp;
9195 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9196 u64EffAddr += (int32_t)u32Disp;
9197 break;
9198 }
9199 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9200 }
9201
9202 }
9203
9204 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9205 *pGCPtrEff = u64EffAddr;
9206 else
9207 {
9208 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9209 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9210 }
9211 }
9212
9213 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
9214 return VINF_SUCCESS;
9215}
9216
9217
9218#ifdef IEM_WITH_SETJMP
9219/**
9220 * Calculates the effective address of a ModR/M memory operand.
9221 *
9222 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9223 *
9224 * May longjmp on internal error.
9225 *
9226 * @return The effective address.
9227 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9228 * @param bRm The ModRM byte.
9229 * @param cbImmAndRspOffset - First byte: The size of any immediate
9230 * following the effective address opcode bytes
9231 * (only for RIP relative addressing).
9232 * - Second byte: RSP displacement (for POP [ESP]).
9233 */
9234RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP
9235{
9236 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
9237# define SET_SS_DEF() \
9238 do \
9239 { \
9240 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9241 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
9242 } while (0)
9243
9244 if (!IEM_IS_64BIT_CODE(pVCpu))
9245 {
9246/** @todo Check the effective address size crap! */
9247 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
9248 {
9249 uint16_t u16EffAddr;
9250
9251 /* Handle the disp16 form with no registers first. */
9252 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9253 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9254 else
9255 {
9256 /* Get the displacment. */
9257 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9258 {
9259 case 0: u16EffAddr = 0; break;
9260 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9261 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9262 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */
9263 }
9264
9265 /* Add the base and index registers to the disp. */
9266 switch (bRm & X86_MODRM_RM_MASK)
9267 {
9268 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
9269 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
9270 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
9271 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
9272 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
9273 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
9274 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
9275 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
9276 }
9277 }
9278
9279 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
9280 return u16EffAddr;
9281 }
9282
9283 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9284 uint32_t u32EffAddr;
9285
9286 /* Handle the disp32 form with no registers first. */
9287 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9288 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9289 else
9290 {
9291 /* Get the register (or SIB) value. */
9292 switch ((bRm & X86_MODRM_RM_MASK))
9293 {
9294 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9295 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9296 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9297 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9298 case 4: /* SIB */
9299 {
9300 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9301
9302 /* Get the index and scale it. */
9303 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9304 {
9305 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9306 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9307 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9308 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9309 case 4: u32EffAddr = 0; /*none */ break;
9310 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9311 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9312 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9313 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9314 }
9315 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9316
9317 /* add base */
9318 switch (bSib & X86_SIB_BASE_MASK)
9319 {
9320 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9321 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9322 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9323 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9324 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9325 case 5:
9326 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9327 {
9328 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9329 SET_SS_DEF();
9330 }
9331 else
9332 {
9333 uint32_t u32Disp;
9334 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9335 u32EffAddr += u32Disp;
9336 }
9337 break;
9338 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9339 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9340 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9341 }
9342 break;
9343 }
9344 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9345 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9346 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9347 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9348 }
9349
9350 /* Get and add the displacement. */
9351 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9352 {
9353 case 0:
9354 break;
9355 case 1:
9356 {
9357 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9358 u32EffAddr += i8Disp;
9359 break;
9360 }
9361 case 2:
9362 {
9363 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9364 u32EffAddr += u32Disp;
9365 break;
9366 }
9367 default:
9368 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */
9369 }
9370 }
9371
9372 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9373 {
9374 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
9375 return u32EffAddr;
9376 }
9377 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9378 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
9379 return u32EffAddr & UINT16_MAX;
9380 }
9381
9382 uint64_t u64EffAddr;
9383
9384 /* Handle the rip+disp32 form with no registers first. */
9385 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9386 {
9387 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9388 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
9389 }
9390 else
9391 {
9392 /* Get the register (or SIB) value. */
9393 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9394 {
9395 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9396 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9397 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9398 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9399 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9400 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9401 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9402 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9403 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9404 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9405 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9406 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9407 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9408 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9409 /* SIB */
9410 case 4:
9411 case 12:
9412 {
9413 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9414
9415 /* Get the index and scale it. */
9416 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9417 {
9418 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9419 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9420 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9421 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9422 case 4: u64EffAddr = 0; /*none */ break;
9423 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9424 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9425 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9426 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9427 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9428 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9429 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9430 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9431 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9432 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9433 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9434 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9435 }
9436 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9437
9438 /* add base */
9439 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9440 {
9441 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9442 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9443 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9444 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9445 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9446 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9447 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9448 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9449 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9450 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9451 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9452 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9453 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9454 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9455 /* complicated encodings */
9456 case 5:
9457 case 13:
9458 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9459 {
9460 if (!pVCpu->iem.s.uRexB)
9461 {
9462 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9463 SET_SS_DEF();
9464 }
9465 else
9466 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9467 }
9468 else
9469 {
9470 uint32_t u32Disp;
9471 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9472 u64EffAddr += (int32_t)u32Disp;
9473 }
9474 break;
9475 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9476 }
9477 break;
9478 }
9479 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9480 }
9481
9482 /* Get and add the displacement. */
9483 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9484 {
9485 case 0:
9486 break;
9487 case 1:
9488 {
9489 int8_t i8Disp;
9490 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9491 u64EffAddr += i8Disp;
9492 break;
9493 }
9494 case 2:
9495 {
9496 uint32_t u32Disp;
9497 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9498 u64EffAddr += (int32_t)u32Disp;
9499 break;
9500 }
9501 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
9502 }
9503
9504 }
9505
9506 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9507 {
9508 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
9509 return u64EffAddr;
9510 }
9511 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9512 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
9513 return u64EffAddr & UINT32_MAX;
9514}
9515#endif /* IEM_WITH_SETJMP */
9516
9517
9518/**
9519 * Calculates the effective address of a ModR/M memory operand, extended version
9520 * for use in the recompilers.
9521 *
9522 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9523 *
9524 * @return Strict VBox status code.
9525 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9526 * @param bRm The ModRM byte.
9527 * @param cbImmAndRspOffset - First byte: The size of any immediate
9528 * following the effective address opcode bytes
9529 * (only for RIP relative addressing).
9530 * - Second byte: RSP displacement (for POP [ESP]).
9531 * @param pGCPtrEff Where to return the effective address.
9532 * @param puInfo Extra info: 32-bit displacement (bits 31:0) and
9533 * SIB byte (bits 39:32).
9534 */
9535VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT
9536{
9537 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
9538# define SET_SS_DEF() \
9539 do \
9540 { \
9541 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9542 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
9543 } while (0)
9544
9545 uint64_t uInfo;
9546 if (!IEM_IS_64BIT_CODE(pVCpu))
9547 {
9548/** @todo Check the effective address size crap! */
9549 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
9550 {
9551 uint16_t u16EffAddr;
9552
9553 /* Handle the disp16 form with no registers first. */
9554 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9555 {
9556 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9557 uInfo = u16EffAddr;
9558 }
9559 else
9560 {
9561 /* Get the displacment. */
9562 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9563 {
9564 case 0: u16EffAddr = 0; break;
9565 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9566 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9567 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
9568 }
9569 uInfo = u16EffAddr;
9570
9571 /* Add the base and index registers to the disp. */
9572 switch (bRm & X86_MODRM_RM_MASK)
9573 {
9574 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
9575 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
9576 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
9577 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
9578 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
9579 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
9580 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
9581 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
9582 }
9583 }
9584
9585 *pGCPtrEff = u16EffAddr;
9586 }
9587 else
9588 {
9589 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9590 uint32_t u32EffAddr;
9591
9592 /* Handle the disp32 form with no registers first. */
9593 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9594 {
9595 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9596 uInfo = u32EffAddr;
9597 }
9598 else
9599 {
9600 /* Get the register (or SIB) value. */
9601 uInfo = 0;
9602 switch ((bRm & X86_MODRM_RM_MASK))
9603 {
9604 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9605 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9606 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9607 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9608 case 4: /* SIB */
9609 {
9610 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9611 uInfo = (uint64_t)bSib << 32;
9612
9613 /* Get the index and scale it. */
9614 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9615 {
9616 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9617 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9618 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9619 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9620 case 4: u32EffAddr = 0; /*none */ break;
9621 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9622 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9623 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9624 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9625 }
9626 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9627
9628 /* add base */
9629 switch (bSib & X86_SIB_BASE_MASK)
9630 {
9631 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9632 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9633 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9634 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9635 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9636 case 5:
9637 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9638 {
9639 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9640 SET_SS_DEF();
9641 }
9642 else
9643 {
9644 uint32_t u32Disp;
9645 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9646 u32EffAddr += u32Disp;
9647 uInfo |= u32Disp;
9648 }
9649 break;
9650 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9651 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9652 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9653 }
9654 break;
9655 }
9656 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9657 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9658 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9659 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9660 }
9661
9662 /* Get and add the displacement. */
9663 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9664 {
9665 case 0:
9666 break;
9667 case 1:
9668 {
9669 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9670 u32EffAddr += i8Disp;
9671 uInfo |= (uint32_t)(int32_t)i8Disp;
9672 break;
9673 }
9674 case 2:
9675 {
9676 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9677 u32EffAddr += u32Disp;
9678 uInfo |= (uint32_t)u32Disp;
9679 break;
9680 }
9681 default:
9682 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
9683 }
9684
9685 }
9686 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9687 *pGCPtrEff = u32EffAddr;
9688 else
9689 {
9690 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9691 *pGCPtrEff = u32EffAddr & UINT16_MAX;
9692 }
9693 }
9694 }
9695 else
9696 {
9697 uint64_t u64EffAddr;
9698
9699 /* Handle the rip+disp32 form with no registers first. */
9700 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9701 {
9702 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9703 uInfo = (uint32_t)u64EffAddr;
9704 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
9705 }
9706 else
9707 {
9708 /* Get the register (or SIB) value. */
9709 uInfo = 0;
9710 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9711 {
9712 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9713 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9714 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9715 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9716 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9717 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9718 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9719 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9720 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9721 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9722 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9723 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9724 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9725 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9726 /* SIB */
9727 case 4:
9728 case 12:
9729 {
9730 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9731 uInfo = (uint64_t)bSib << 32;
9732
9733 /* Get the index and scale it. */
9734 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9735 {
9736 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9737 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9738 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9739 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9740 case 4: u64EffAddr = 0; /*none */ break;
9741 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9742 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9743 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9744 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9745 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9746 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9747 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9748 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9749 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9750 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9751 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9752 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9753 }
9754 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9755
9756 /* add base */
9757 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9758 {
9759 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9760 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9761 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9762 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9763 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9764 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9765 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9766 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9767 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9768 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9769 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9770 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9771 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9772 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9773 /* complicated encodings */
9774 case 5:
9775 case 13:
9776 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9777 {
9778 if (!pVCpu->iem.s.uRexB)
9779 {
9780 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9781 SET_SS_DEF();
9782 }
9783 else
9784 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9785 }
9786 else
9787 {
9788 uint32_t u32Disp;
9789 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9790 u64EffAddr += (int32_t)u32Disp;
9791 uInfo |= u32Disp;
9792 }
9793 break;
9794 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9795 }
9796 break;
9797 }
9798 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9799 }
9800
9801 /* Get and add the displacement. */
9802 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9803 {
9804 case 0:
9805 break;
9806 case 1:
9807 {
9808 int8_t i8Disp;
9809 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9810 u64EffAddr += i8Disp;
9811 uInfo |= (uint32_t)(int32_t)i8Disp;
9812 break;
9813 }
9814 case 2:
9815 {
9816 uint32_t u32Disp;
9817 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9818 u64EffAddr += (int32_t)u32Disp;
9819 uInfo |= u32Disp;
9820 break;
9821 }
9822 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9823 }
9824
9825 }
9826
9827 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9828 *pGCPtrEff = u64EffAddr;
9829 else
9830 {
9831 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9832 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9833 }
9834 }
9835 *puInfo = uInfo;
9836
9837 Log5(("iemOpHlpCalcRmEffAddrEx: EffAddr=%#010RGv uInfo=%RX64\n", *pGCPtrEff, uInfo));
9838 return VINF_SUCCESS;
9839}
9840
9841/** @} */
9842
9843
9844#ifdef LOG_ENABLED
9845/**
9846 * Logs the current instruction.
9847 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9848 * @param fSameCtx Set if we have the same context information as the VMM,
9849 * clear if we may have already executed an instruction in
9850 * our debug context. When clear, we assume IEMCPU holds
9851 * valid CPU mode info.
9852 *
9853 * The @a fSameCtx parameter is now misleading and obsolete.
9854 * @param pszFunction The IEM function doing the execution.
9855 */
9856static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
9857{
9858# ifdef IN_RING3
9859 if (LogIs2Enabled())
9860 {
9861 char szInstr[256];
9862 uint32_t cbInstr = 0;
9863 if (fSameCtx)
9864 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
9865 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9866 szInstr, sizeof(szInstr), &cbInstr);
9867 else
9868 {
9869 uint32_t fFlags = 0;
9870 switch (IEM_GET_CPU_MODE(pVCpu))
9871 {
9872 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
9873 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
9874 case IEMMODE_16BIT:
9875 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
9876 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
9877 else
9878 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
9879 break;
9880 }
9881 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
9882 szInstr, sizeof(szInstr), &cbInstr);
9883 }
9884
9885 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9886 Log2(("**** %s fExec=%x\n"
9887 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
9888 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
9889 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
9890 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
9891 " %s\n"
9892 , pszFunction, pVCpu->iem.s.fExec,
9893 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
9894 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
9895 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
9896 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
9897 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
9898 szInstr));
9899
9900 if (LogIs3Enabled())
9901 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
9902 }
9903 else
9904# endif
9905 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
9906 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
9907 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
9908}
9909#endif /* LOG_ENABLED */
9910
9911
9912#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9913/**
9914 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
9915 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
9916 *
9917 * @returns Modified rcStrict.
9918 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9919 * @param rcStrict The instruction execution status.
9920 */
9921static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
9922{
9923 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
9924 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
9925 {
9926 /* VMX preemption timer takes priority over NMI-window exits. */
9927 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
9928 {
9929 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
9930 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
9931 }
9932 /*
9933 * Check remaining intercepts.
9934 *
9935 * NMI-window and Interrupt-window VM-exits.
9936 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
9937 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
9938 *
9939 * See Intel spec. 26.7.6 "NMI-Window Exiting".
9940 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
9941 */
9942 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
9943 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9944 && !TRPMHasTrap(pVCpu))
9945 {
9946 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
9947 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
9948 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
9949 {
9950 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
9951 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
9952 }
9953 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
9954 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
9955 {
9956 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
9957 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
9958 }
9959 }
9960 }
9961 /* TPR-below threshold/APIC write has the highest priority. */
9962 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
9963 {
9964 rcStrict = iemVmxApicWriteEmulation(pVCpu);
9965 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9966 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
9967 }
9968 /* MTF takes priority over VMX-preemption timer. */
9969 else
9970 {
9971 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
9972 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9973 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
9974 }
9975 return rcStrict;
9976}
9977#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9978
9979
9980/**
9981 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
9982 * IEMExecOneWithPrefetchedByPC.
9983 *
9984 * Similar code is found in IEMExecLots.
9985 *
9986 * @return Strict VBox status code.
9987 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9988 * @param fExecuteInhibit If set, execute the instruction following CLI,
9989 * POP SS and MOV SS,GR.
9990 * @param pszFunction The calling function name.
9991 */
9992DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
9993{
9994 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9995 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9996 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9997 RT_NOREF_PV(pszFunction);
9998
9999#ifdef IEM_WITH_SETJMP
10000 VBOXSTRICTRC rcStrict;
10001 IEM_TRY_SETJMP(pVCpu, rcStrict)
10002 {
10003 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10004 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10005 }
10006 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10007 {
10008 pVCpu->iem.s.cLongJumps++;
10009 }
10010 IEM_CATCH_LONGJMP_END(pVCpu);
10011#else
10012 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10013 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10014#endif
10015 if (rcStrict == VINF_SUCCESS)
10016 pVCpu->iem.s.cInstructions++;
10017 if (pVCpu->iem.s.cActiveMappings > 0)
10018 {
10019 Assert(rcStrict != VINF_SUCCESS);
10020 iemMemRollback(pVCpu);
10021 }
10022 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
10023 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
10024 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
10025
10026//#ifdef DEBUG
10027// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
10028//#endif
10029
10030#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10031 /*
10032 * Perform any VMX nested-guest instruction boundary actions.
10033 *
10034 * If any of these causes a VM-exit, we must skip executing the next
10035 * instruction (would run into stale page tables). A VM-exit makes sure
10036 * there is no interrupt-inhibition, so that should ensure we don't go
10037 * to try execute the next instruction. Clearing fExecuteInhibit is
10038 * problematic because of the setjmp/longjmp clobbering above.
10039 */
10040 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10041 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
10042 || rcStrict != VINF_SUCCESS)
10043 { /* likely */ }
10044 else
10045 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10046#endif
10047
10048 /* Execute the next instruction as well if a cli, pop ss or
10049 mov ss, Gr has just completed successfully. */
10050 if ( fExecuteInhibit
10051 && rcStrict == VINF_SUCCESS
10052 && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
10053 {
10054 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fExec & (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK));
10055 if (rcStrict == VINF_SUCCESS)
10056 {
10057#ifdef LOG_ENABLED
10058 iemLogCurInstr(pVCpu, false, pszFunction);
10059#endif
10060#ifdef IEM_WITH_SETJMP
10061 IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
10062 {
10063 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10064 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10065 }
10066 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10067 {
10068 pVCpu->iem.s.cLongJumps++;
10069 }
10070 IEM_CATCH_LONGJMP_END(pVCpu);
10071#else
10072 IEM_OPCODE_GET_FIRST_U8(&b);
10073 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10074#endif
10075 if (rcStrict == VINF_SUCCESS)
10076 {
10077 pVCpu->iem.s.cInstructions++;
10078#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10079 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10080 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
10081 { /* likely */ }
10082 else
10083 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10084#endif
10085 }
10086 if (pVCpu->iem.s.cActiveMappings > 0)
10087 {
10088 Assert(rcStrict != VINF_SUCCESS);
10089 iemMemRollback(pVCpu);
10090 }
10091 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
10092 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
10093 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
10094 }
10095 else if (pVCpu->iem.s.cActiveMappings > 0)
10096 iemMemRollback(pVCpu);
10097 /** @todo drop this after we bake this change into RIP advancing. */
10098 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
10099 }
10100
10101 /*
10102 * Return value fiddling, statistics and sanity assertions.
10103 */
10104 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10105
10106 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10107 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10108 return rcStrict;
10109}
10110
10111
10112/**
10113 * Execute one instruction.
10114 *
10115 * @return Strict VBox status code.
10116 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10117 */
10118VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
10119{
10120 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
10121#ifdef LOG_ENABLED
10122 iemLogCurInstr(pVCpu, true, "IEMExecOne");
10123#endif
10124
10125 /*
10126 * Do the decoding and emulation.
10127 */
10128 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10129 if (rcStrict == VINF_SUCCESS)
10130 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
10131 else if (pVCpu->iem.s.cActiveMappings > 0)
10132 iemMemRollback(pVCpu);
10133
10134 if (rcStrict != VINF_SUCCESS)
10135 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10136 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10137 return rcStrict;
10138}
10139
10140
10141VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
10142{
10143 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
10144 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10145 if (rcStrict == VINF_SUCCESS)
10146 {
10147 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
10148 if (pcbWritten)
10149 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
10150 }
10151 else if (pVCpu->iem.s.cActiveMappings > 0)
10152 iemMemRollback(pVCpu);
10153
10154 return rcStrict;
10155}
10156
10157
10158VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
10159 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10160{
10161 VBOXSTRICTRC rcStrict;
10162 if ( cbOpcodeBytes
10163 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
10164 {
10165 iemInitDecoder(pVCpu, 0 /*fExecOpts*/);
10166#ifdef IEM_WITH_CODE_TLB
10167 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
10168 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
10169 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
10170 pVCpu->iem.s.offCurInstrStart = 0;
10171 pVCpu->iem.s.offInstrNextByte = 0;
10172 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
10173#else
10174 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
10175 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
10176#endif
10177 rcStrict = VINF_SUCCESS;
10178 }
10179 else
10180 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10181 if (rcStrict == VINF_SUCCESS)
10182 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
10183 else if (pVCpu->iem.s.cActiveMappings > 0)
10184 iemMemRollback(pVCpu);
10185
10186 return rcStrict;
10187}
10188
10189
10190VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
10191{
10192 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
10193 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
10194 if (rcStrict == VINF_SUCCESS)
10195 {
10196 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
10197 if (pcbWritten)
10198 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
10199 }
10200 else if (pVCpu->iem.s.cActiveMappings > 0)
10201 iemMemRollback(pVCpu);
10202
10203 return rcStrict;
10204}
10205
10206
10207VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
10208 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10209{
10210 VBOXSTRICTRC rcStrict;
10211 if ( cbOpcodeBytes
10212 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
10213 {
10214 iemInitDecoder(pVCpu, IEM_F_BYPASS_HANDLERS);
10215#ifdef IEM_WITH_CODE_TLB
10216 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
10217 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
10218 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
10219 pVCpu->iem.s.offCurInstrStart = 0;
10220 pVCpu->iem.s.offInstrNextByte = 0;
10221 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
10222#else
10223 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
10224 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
10225#endif
10226 rcStrict = VINF_SUCCESS;
10227 }
10228 else
10229 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
10230 if (rcStrict == VINF_SUCCESS)
10231 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
10232 else if (pVCpu->iem.s.cActiveMappings > 0)
10233 iemMemRollback(pVCpu);
10234
10235 return rcStrict;
10236}
10237
10238
10239/**
10240 * For handling split cacheline lock operations when the host has split-lock
10241 * detection enabled.
10242 *
10243 * This will cause the interpreter to disregard the lock prefix and implicit
10244 * locking (xchg).
10245 *
10246 * @returns Strict VBox status code.
10247 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10248 */
10249VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
10250{
10251 /*
10252 * Do the decoding and emulation.
10253 */
10254 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_X86_DISREGARD_LOCK);
10255 if (rcStrict == VINF_SUCCESS)
10256 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
10257 else if (pVCpu->iem.s.cActiveMappings > 0)
10258 iemMemRollback(pVCpu);
10259
10260 if (rcStrict != VINF_SUCCESS)
10261 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10262 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10263 return rcStrict;
10264}
10265
10266
10267/**
10268 * Code common to IEMExecLots and IEMExecRecompilerThreaded that attempts to
10269 * inject a pending TRPM trap.
10270 */
10271VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu)
10272{
10273 Assert(TRPMHasTrap(pVCpu));
10274
10275 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
10276 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
10277 {
10278 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
10279#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10280 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
10281 if (fIntrEnabled)
10282 {
10283 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
10284 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
10285 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
10286 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
10287 else
10288 {
10289 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
10290 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
10291 }
10292 }
10293#else
10294 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
10295#endif
10296 if (fIntrEnabled)
10297 {
10298 uint8_t u8TrapNo;
10299 TRPMEVENT enmType;
10300 uint32_t uErrCode;
10301 RTGCPTR uCr2;
10302 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
10303 AssertRC(rc2);
10304 Assert(enmType == TRPM_HARDWARE_INT);
10305 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
10306
10307 TRPMResetTrap(pVCpu);
10308
10309#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10310 /* Injecting an event may cause a VM-exit. */
10311 if ( rcStrict != VINF_SUCCESS
10312 && rcStrict != VINF_IEM_RAISED_XCPT)
10313 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
10314#else
10315 NOREF(rcStrict);
10316#endif
10317 }
10318 }
10319
10320 return VINF_SUCCESS;
10321}
10322
10323
10324VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
10325{
10326 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
10327 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
10328 Assert(cMaxInstructions > 0);
10329
10330 /*
10331 * See if there is an interrupt pending in TRPM, inject it if we can.
10332 */
10333 /** @todo What if we are injecting an exception and not an interrupt? Is that
10334 * possible here? For now we assert it is indeed only an interrupt. */
10335 if (!TRPMHasTrap(pVCpu))
10336 { /* likely */ }
10337 else
10338 {
10339 VBOXSTRICTRC rcStrict = iemExecInjectPendingTrap(pVCpu);
10340 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10341 { /*likely */ }
10342 else
10343 return rcStrict;
10344 }
10345
10346 /*
10347 * Initial decoder init w/ prefetch, then setup setjmp.
10348 */
10349 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10350 if (rcStrict == VINF_SUCCESS)
10351 {
10352#ifdef IEM_WITH_SETJMP
10353 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
10354 IEM_TRY_SETJMP(pVCpu, rcStrict)
10355#endif
10356 {
10357 /*
10358 * The run loop. We limit ourselves to 4096 instructions right now.
10359 */
10360 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
10361 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10362 for (;;)
10363 {
10364 /*
10365 * Log the state.
10366 */
10367#ifdef LOG_ENABLED
10368 iemLogCurInstr(pVCpu, true, "IEMExecLots");
10369#endif
10370
10371 /*
10372 * Do the decoding and emulation.
10373 */
10374 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10375 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10376#ifdef VBOX_STRICT
10377 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
10378#endif
10379 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10380 {
10381 Assert(pVCpu->iem.s.cActiveMappings == 0);
10382 pVCpu->iem.s.cInstructions++;
10383
10384#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10385 /* Perform any VMX nested-guest instruction boundary actions. */
10386 uint64_t fCpu = pVCpu->fLocalForcedActions;
10387 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10388 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
10389 { /* likely */ }
10390 else
10391 {
10392 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10393 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10394 fCpu = pVCpu->fLocalForcedActions;
10395 else
10396 {
10397 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10398 break;
10399 }
10400 }
10401#endif
10402 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
10403 {
10404#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
10405 uint64_t fCpu = pVCpu->fLocalForcedActions;
10406#endif
10407 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
10408 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
10409 | VMCPU_FF_TLB_FLUSH
10410 | VMCPU_FF_UNHALT );
10411
10412 if (RT_LIKELY( ( !fCpu
10413 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10414 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
10415 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
10416 {
10417 if (--cMaxInstructionsGccStupidity > 0)
10418 {
10419 /* Poll timers every now an then according to the caller's specs. */
10420 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
10421 || !TMTimerPollBool(pVM, pVCpu))
10422 {
10423 Assert(pVCpu->iem.s.cActiveMappings == 0);
10424 iemReInitDecoder(pVCpu);
10425 continue;
10426 }
10427 }
10428 }
10429 }
10430 Assert(pVCpu->iem.s.cActiveMappings == 0);
10431 }
10432 else if (pVCpu->iem.s.cActiveMappings > 0)
10433 iemMemRollback(pVCpu);
10434 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10435 break;
10436 }
10437 }
10438#ifdef IEM_WITH_SETJMP
10439 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10440 {
10441 if (pVCpu->iem.s.cActiveMappings > 0)
10442 iemMemRollback(pVCpu);
10443# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10444 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10445# endif
10446 pVCpu->iem.s.cLongJumps++;
10447 }
10448 IEM_CATCH_LONGJMP_END(pVCpu);
10449#endif
10450
10451 /*
10452 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10453 */
10454 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10455 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10456 }
10457 else
10458 {
10459 if (pVCpu->iem.s.cActiveMappings > 0)
10460 iemMemRollback(pVCpu);
10461
10462#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10463 /*
10464 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10465 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10466 */
10467 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10468#endif
10469 }
10470
10471 /*
10472 * Maybe re-enter raw-mode and log.
10473 */
10474 if (rcStrict != VINF_SUCCESS)
10475 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10476 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10477 if (pcInstructions)
10478 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
10479 return rcStrict;
10480}
10481
10482
10483/**
10484 * Interface used by EMExecuteExec, does exit statistics and limits.
10485 *
10486 * @returns Strict VBox status code.
10487 * @param pVCpu The cross context virtual CPU structure.
10488 * @param fWillExit To be defined.
10489 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
10490 * @param cMaxInstructions Maximum number of instructions to execute.
10491 * @param cMaxInstructionsWithoutExits
10492 * The max number of instructions without exits.
10493 * @param pStats Where to return statistics.
10494 */
10495VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
10496 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
10497{
10498 NOREF(fWillExit); /** @todo define flexible exit crits */
10499
10500 /*
10501 * Initialize return stats.
10502 */
10503 pStats->cInstructions = 0;
10504 pStats->cExits = 0;
10505 pStats->cMaxExitDistance = 0;
10506 pStats->cReserved = 0;
10507
10508 /*
10509 * Initial decoder init w/ prefetch, then setup setjmp.
10510 */
10511 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10512 if (rcStrict == VINF_SUCCESS)
10513 {
10514#ifdef IEM_WITH_SETJMP
10515 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf?!? */
10516 IEM_TRY_SETJMP(pVCpu, rcStrict)
10517#endif
10518 {
10519#ifdef IN_RING0
10520 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
10521#endif
10522 uint32_t cInstructionSinceLastExit = 0;
10523
10524 /*
10525 * The run loop. We limit ourselves to 4096 instructions right now.
10526 */
10527 PVM pVM = pVCpu->CTX_SUFF(pVM);
10528 for (;;)
10529 {
10530 /*
10531 * Log the state.
10532 */
10533#ifdef LOG_ENABLED
10534 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
10535#endif
10536
10537 /*
10538 * Do the decoding and emulation.
10539 */
10540 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
10541
10542 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10543 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10544
10545 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
10546 && cInstructionSinceLastExit > 0 /* don't count the first */ )
10547 {
10548 pStats->cExits += 1;
10549 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
10550 pStats->cMaxExitDistance = cInstructionSinceLastExit;
10551 cInstructionSinceLastExit = 0;
10552 }
10553
10554 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10555 {
10556 Assert(pVCpu->iem.s.cActiveMappings == 0);
10557 pVCpu->iem.s.cInstructions++;
10558 pStats->cInstructions++;
10559 cInstructionSinceLastExit++;
10560
10561#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10562 /* Perform any VMX nested-guest instruction boundary actions. */
10563 uint64_t fCpu = pVCpu->fLocalForcedActions;
10564 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10565 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
10566 { /* likely */ }
10567 else
10568 {
10569 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10570 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10571 fCpu = pVCpu->fLocalForcedActions;
10572 else
10573 {
10574 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10575 break;
10576 }
10577 }
10578#endif
10579 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
10580 {
10581#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
10582 uint64_t fCpu = pVCpu->fLocalForcedActions;
10583#endif
10584 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
10585 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
10586 | VMCPU_FF_TLB_FLUSH
10587 | VMCPU_FF_UNHALT );
10588 if (RT_LIKELY( ( ( !fCpu
10589 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10590 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
10591 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
10592 || pStats->cInstructions < cMinInstructions))
10593 {
10594 if (pStats->cInstructions < cMaxInstructions)
10595 {
10596 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
10597 {
10598#ifdef IN_RING0
10599 if ( !fCheckPreemptionPending
10600 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
10601#endif
10602 {
10603 Assert(pVCpu->iem.s.cActiveMappings == 0);
10604 iemReInitDecoder(pVCpu);
10605 continue;
10606 }
10607#ifdef IN_RING0
10608 rcStrict = VINF_EM_RAW_INTERRUPT;
10609 break;
10610#endif
10611 }
10612 }
10613 }
10614 Assert(!(fCpu & VMCPU_FF_IEM));
10615 }
10616 Assert(pVCpu->iem.s.cActiveMappings == 0);
10617 }
10618 else if (pVCpu->iem.s.cActiveMappings > 0)
10619 iemMemRollback(pVCpu);
10620 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10621 break;
10622 }
10623 }
10624#ifdef IEM_WITH_SETJMP
10625 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10626 {
10627 if (pVCpu->iem.s.cActiveMappings > 0)
10628 iemMemRollback(pVCpu);
10629 pVCpu->iem.s.cLongJumps++;
10630 }
10631 IEM_CATCH_LONGJMP_END(pVCpu);
10632#endif
10633
10634 /*
10635 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10636 */
10637 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10638 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10639 }
10640 else
10641 {
10642 if (pVCpu->iem.s.cActiveMappings > 0)
10643 iemMemRollback(pVCpu);
10644
10645#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10646 /*
10647 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10648 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10649 */
10650 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10651#endif
10652 }
10653
10654 /*
10655 * Maybe re-enter raw-mode and log.
10656 */
10657 if (rcStrict != VINF_SUCCESS)
10658 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
10659 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
10660 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
10661 return rcStrict;
10662}
10663
10664
10665/**
10666 * Injects a trap, fault, abort, software interrupt or external interrupt.
10667 *
10668 * The parameter list matches TRPMQueryTrapAll pretty closely.
10669 *
10670 * @returns Strict VBox status code.
10671 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10672 * @param u8TrapNo The trap number.
10673 * @param enmType What type is it (trap/fault/abort), software
10674 * interrupt or hardware interrupt.
10675 * @param uErrCode The error code if applicable.
10676 * @param uCr2 The CR2 value if applicable.
10677 * @param cbInstr The instruction length (only relevant for
10678 * software interrupts).
10679 */
10680VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
10681 uint8_t cbInstr)
10682{
10683 iemInitDecoder(pVCpu, 0 /*fExecOpts*/); /** @todo wrong init function! */
10684#ifdef DBGFTRACE_ENABLED
10685 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
10686 u8TrapNo, enmType, uErrCode, uCr2);
10687#endif
10688
10689 uint32_t fFlags;
10690 switch (enmType)
10691 {
10692 case TRPM_HARDWARE_INT:
10693 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
10694 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
10695 uErrCode = uCr2 = 0;
10696 break;
10697
10698 case TRPM_SOFTWARE_INT:
10699 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
10700 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
10701 uErrCode = uCr2 = 0;
10702 break;
10703
10704 case TRPM_TRAP:
10705 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
10706 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
10707 if (u8TrapNo == X86_XCPT_PF)
10708 fFlags |= IEM_XCPT_FLAGS_CR2;
10709 switch (u8TrapNo)
10710 {
10711 case X86_XCPT_DF:
10712 case X86_XCPT_TS:
10713 case X86_XCPT_NP:
10714 case X86_XCPT_SS:
10715 case X86_XCPT_PF:
10716 case X86_XCPT_AC:
10717 case X86_XCPT_GP:
10718 fFlags |= IEM_XCPT_FLAGS_ERR;
10719 break;
10720 }
10721 break;
10722
10723 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10724 }
10725
10726 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
10727
10728 if (pVCpu->iem.s.cActiveMappings > 0)
10729 iemMemRollback(pVCpu);
10730
10731 return rcStrict;
10732}
10733
10734
10735/**
10736 * Injects the active TRPM event.
10737 *
10738 * @returns Strict VBox status code.
10739 * @param pVCpu The cross context virtual CPU structure.
10740 */
10741VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
10742{
10743#ifndef IEM_IMPLEMENTS_TASKSWITCH
10744 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
10745#else
10746 uint8_t u8TrapNo;
10747 TRPMEVENT enmType;
10748 uint32_t uErrCode;
10749 RTGCUINTPTR uCr2;
10750 uint8_t cbInstr;
10751 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
10752 if (RT_FAILURE(rc))
10753 return rc;
10754
10755 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
10756 * ICEBP \#DB injection as a special case. */
10757 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
10758#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
10759 if (rcStrict == VINF_SVM_VMEXIT)
10760 rcStrict = VINF_SUCCESS;
10761#endif
10762#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10763 if (rcStrict == VINF_VMX_VMEXIT)
10764 rcStrict = VINF_SUCCESS;
10765#endif
10766 /** @todo Are there any other codes that imply the event was successfully
10767 * delivered to the guest? See @bugref{6607}. */
10768 if ( rcStrict == VINF_SUCCESS
10769 || rcStrict == VINF_IEM_RAISED_XCPT)
10770 TRPMResetTrap(pVCpu);
10771
10772 return rcStrict;
10773#endif
10774}
10775
10776
10777VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
10778{
10779 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10780 return VERR_NOT_IMPLEMENTED;
10781}
10782
10783
10784VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
10785{
10786 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10787 return VERR_NOT_IMPLEMENTED;
10788}
10789
10790
10791/**
10792 * Interface for HM and EM for executing string I/O OUT (write) instructions.
10793 *
10794 * This API ASSUMES that the caller has already verified that the guest code is
10795 * allowed to access the I/O port. (The I/O port is in the DX register in the
10796 * guest state.)
10797 *
10798 * @returns Strict VBox status code.
10799 * @param pVCpu The cross context virtual CPU structure.
10800 * @param cbValue The size of the I/O port access (1, 2, or 4).
10801 * @param enmAddrMode The addressing mode.
10802 * @param fRepPrefix Indicates whether a repeat prefix is used
10803 * (doesn't matter which for this instruction).
10804 * @param cbInstr The instruction length in bytes.
10805 * @param iEffSeg The effective segment address.
10806 * @param fIoChecked Whether the access to the I/O port has been
10807 * checked or not. It's typically checked in the
10808 * HM scenario.
10809 */
10810VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10811 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
10812{
10813 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
10814 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10815
10816 /*
10817 * State init.
10818 */
10819 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10820
10821 /*
10822 * Switch orgy for getting to the right handler.
10823 */
10824 VBOXSTRICTRC rcStrict;
10825 if (fRepPrefix)
10826 {
10827 switch (enmAddrMode)
10828 {
10829 case IEMMODE_16BIT:
10830 switch (cbValue)
10831 {
10832 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10833 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10834 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10835 default:
10836 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10837 }
10838 break;
10839
10840 case IEMMODE_32BIT:
10841 switch (cbValue)
10842 {
10843 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10844 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10845 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10846 default:
10847 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10848 }
10849 break;
10850
10851 case IEMMODE_64BIT:
10852 switch (cbValue)
10853 {
10854 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10855 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10856 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10857 default:
10858 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10859 }
10860 break;
10861
10862 default:
10863 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10864 }
10865 }
10866 else
10867 {
10868 switch (enmAddrMode)
10869 {
10870 case IEMMODE_16BIT:
10871 switch (cbValue)
10872 {
10873 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10874 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10875 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10876 default:
10877 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10878 }
10879 break;
10880
10881 case IEMMODE_32BIT:
10882 switch (cbValue)
10883 {
10884 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10885 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10886 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10887 default:
10888 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10889 }
10890 break;
10891
10892 case IEMMODE_64BIT:
10893 switch (cbValue)
10894 {
10895 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10896 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10897 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10898 default:
10899 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10900 }
10901 break;
10902
10903 default:
10904 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10905 }
10906 }
10907
10908 if (pVCpu->iem.s.cActiveMappings)
10909 iemMemRollback(pVCpu);
10910
10911 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10912}
10913
10914
10915/**
10916 * Interface for HM and EM for executing string I/O IN (read) instructions.
10917 *
10918 * This API ASSUMES that the caller has already verified that the guest code is
10919 * allowed to access the I/O port. (The I/O port is in the DX register in the
10920 * guest state.)
10921 *
10922 * @returns Strict VBox status code.
10923 * @param pVCpu The cross context virtual CPU structure.
10924 * @param cbValue The size of the I/O port access (1, 2, or 4).
10925 * @param enmAddrMode The addressing mode.
10926 * @param fRepPrefix Indicates whether a repeat prefix is used
10927 * (doesn't matter which for this instruction).
10928 * @param cbInstr The instruction length in bytes.
10929 * @param fIoChecked Whether the access to the I/O port has been
10930 * checked or not. It's typically checked in the
10931 * HM scenario.
10932 */
10933VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10934 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
10935{
10936 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10937
10938 /*
10939 * State init.
10940 */
10941 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10942
10943 /*
10944 * Switch orgy for getting to the right handler.
10945 */
10946 VBOXSTRICTRC rcStrict;
10947 if (fRepPrefix)
10948 {
10949 switch (enmAddrMode)
10950 {
10951 case IEMMODE_16BIT:
10952 switch (cbValue)
10953 {
10954 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10955 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10956 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10957 default:
10958 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10959 }
10960 break;
10961
10962 case IEMMODE_32BIT:
10963 switch (cbValue)
10964 {
10965 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10966 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10967 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10968 default:
10969 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10970 }
10971 break;
10972
10973 case IEMMODE_64BIT:
10974 switch (cbValue)
10975 {
10976 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10977 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10978 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10979 default:
10980 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10981 }
10982 break;
10983
10984 default:
10985 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10986 }
10987 }
10988 else
10989 {
10990 switch (enmAddrMode)
10991 {
10992 case IEMMODE_16BIT:
10993 switch (cbValue)
10994 {
10995 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10996 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10997 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10998 default:
10999 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11000 }
11001 break;
11002
11003 case IEMMODE_32BIT:
11004 switch (cbValue)
11005 {
11006 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
11007 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
11008 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
11009 default:
11010 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11011 }
11012 break;
11013
11014 case IEMMODE_64BIT:
11015 switch (cbValue)
11016 {
11017 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
11018 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
11019 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
11020 default:
11021 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11022 }
11023 break;
11024
11025 default:
11026 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11027 }
11028 }
11029
11030 if ( pVCpu->iem.s.cActiveMappings == 0
11031 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
11032 { /* likely */ }
11033 else
11034 {
11035 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
11036 iemMemRollback(pVCpu);
11037 }
11038 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11039}
11040
11041
11042/**
11043 * Interface for rawmode to write execute an OUT instruction.
11044 *
11045 * @returns Strict VBox status code.
11046 * @param pVCpu The cross context virtual CPU structure.
11047 * @param cbInstr The instruction length in bytes.
11048 * @param u16Port The port to read.
11049 * @param fImm Whether the port is specified using an immediate operand or
11050 * using the implicit DX register.
11051 * @param cbReg The register size.
11052 *
11053 * @remarks In ring-0 not all of the state needs to be synced in.
11054 */
11055VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
11056{
11057 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11058 Assert(cbReg <= 4 && cbReg != 3);
11059
11060 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11061 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, cbReg,
11062 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
11063 Assert(!pVCpu->iem.s.cActiveMappings);
11064 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11065}
11066
11067
11068/**
11069 * Interface for rawmode to write execute an IN instruction.
11070 *
11071 * @returns Strict VBox status code.
11072 * @param pVCpu The cross context virtual CPU structure.
11073 * @param cbInstr The instruction length in bytes.
11074 * @param u16Port The port to read.
11075 * @param fImm Whether the port is specified using an immediate operand or
11076 * using the implicit DX.
11077 * @param cbReg The register size.
11078 */
11079VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
11080{
11081 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11082 Assert(cbReg <= 4 && cbReg != 3);
11083
11084 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11085 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, cbReg,
11086 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
11087 Assert(!pVCpu->iem.s.cActiveMappings);
11088 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11089}
11090
11091
11092/**
11093 * Interface for HM and EM to write to a CRx register.
11094 *
11095 * @returns Strict VBox status code.
11096 * @param pVCpu The cross context virtual CPU structure.
11097 * @param cbInstr The instruction length in bytes.
11098 * @param iCrReg The control register number (destination).
11099 * @param iGReg The general purpose register number (source).
11100 *
11101 * @remarks In ring-0 not all of the state needs to be synced in.
11102 */
11103VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
11104{
11105 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11106 Assert(iCrReg < 16);
11107 Assert(iGReg < 16);
11108
11109 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11110 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
11111 Assert(!pVCpu->iem.s.cActiveMappings);
11112 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11113}
11114
11115
11116/**
11117 * Interface for HM and EM to read from a CRx register.
11118 *
11119 * @returns Strict VBox status code.
11120 * @param pVCpu The cross context virtual CPU structure.
11121 * @param cbInstr The instruction length in bytes.
11122 * @param iGReg The general purpose register number (destination).
11123 * @param iCrReg The control register number (source).
11124 *
11125 * @remarks In ring-0 not all of the state needs to be synced in.
11126 */
11127VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
11128{
11129 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11130 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
11131 | CPUMCTX_EXTRN_APIC_TPR);
11132 Assert(iCrReg < 16);
11133 Assert(iGReg < 16);
11134
11135 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11136 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
11137 Assert(!pVCpu->iem.s.cActiveMappings);
11138 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11139}
11140
11141
11142/**
11143 * Interface for HM and EM to write to a DRx register.
11144 *
11145 * @returns Strict VBox status code.
11146 * @param pVCpu The cross context virtual CPU structure.
11147 * @param cbInstr The instruction length in bytes.
11148 * @param iDrReg The debug register number (destination).
11149 * @param iGReg The general purpose register number (source).
11150 *
11151 * @remarks In ring-0 not all of the state needs to be synced in.
11152 */
11153VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iDrReg, uint8_t iGReg)
11154{
11155 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11156 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
11157 Assert(iDrReg < 8);
11158 Assert(iGReg < 16);
11159
11160 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11161 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Dd_Rd, iDrReg, iGReg);
11162 Assert(!pVCpu->iem.s.cActiveMappings);
11163 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11164}
11165
11166
11167/**
11168 * Interface for HM and EM to read from a DRx register.
11169 *
11170 * @returns Strict VBox status code.
11171 * @param pVCpu The cross context virtual CPU structure.
11172 * @param cbInstr The instruction length in bytes.
11173 * @param iGReg The general purpose register number (destination).
11174 * @param iDrReg The debug register number (source).
11175 *
11176 * @remarks In ring-0 not all of the state needs to be synced in.
11177 */
11178VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iDrReg)
11179{
11180 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11181 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
11182 Assert(iDrReg < 8);
11183 Assert(iGReg < 16);
11184
11185 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11186 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Dd, iGReg, iDrReg);
11187 Assert(!pVCpu->iem.s.cActiveMappings);
11188 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11189}
11190
11191
11192/**
11193 * Interface for HM and EM to clear the CR0[TS] bit.
11194 *
11195 * @returns Strict VBox status code.
11196 * @param pVCpu The cross context virtual CPU structure.
11197 * @param cbInstr The instruction length in bytes.
11198 *
11199 * @remarks In ring-0 not all of the state needs to be synced in.
11200 */
11201VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
11202{
11203 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11204
11205 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11206 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
11207 Assert(!pVCpu->iem.s.cActiveMappings);
11208 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11209}
11210
11211
11212/**
11213 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
11214 *
11215 * @returns Strict VBox status code.
11216 * @param pVCpu The cross context virtual CPU structure.
11217 * @param cbInstr The instruction length in bytes.
11218 * @param uValue The value to load into CR0.
11219 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
11220 * memory operand. Otherwise pass NIL_RTGCPTR.
11221 *
11222 * @remarks In ring-0 not all of the state needs to be synced in.
11223 */
11224VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
11225{
11226 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11227
11228 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11229 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
11230 Assert(!pVCpu->iem.s.cActiveMappings);
11231 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11232}
11233
11234
11235/**
11236 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
11237 *
11238 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
11239 *
11240 * @returns Strict VBox status code.
11241 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11242 * @param cbInstr The instruction length in bytes.
11243 * @remarks In ring-0 not all of the state needs to be synced in.
11244 * @thread EMT(pVCpu)
11245 */
11246VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
11247{
11248 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11249
11250 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11251 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
11252 Assert(!pVCpu->iem.s.cActiveMappings);
11253 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11254}
11255
11256
11257/**
11258 * Interface for HM and EM to emulate the WBINVD instruction.
11259 *
11260 * @returns Strict VBox status code.
11261 * @param pVCpu The cross context virtual CPU structure.
11262 * @param cbInstr The instruction length in bytes.
11263 *
11264 * @remarks In ring-0 not all of the state needs to be synced in.
11265 */
11266VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
11267{
11268 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11269
11270 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11271 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
11272 Assert(!pVCpu->iem.s.cActiveMappings);
11273 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11274}
11275
11276
11277/**
11278 * Interface for HM and EM to emulate the INVD instruction.
11279 *
11280 * @returns Strict VBox status code.
11281 * @param pVCpu The cross context virtual CPU structure.
11282 * @param cbInstr The instruction length in bytes.
11283 *
11284 * @remarks In ring-0 not all of the state needs to be synced in.
11285 */
11286VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
11287{
11288 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11289
11290 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11291 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
11292 Assert(!pVCpu->iem.s.cActiveMappings);
11293 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11294}
11295
11296
11297/**
11298 * Interface for HM and EM to emulate the INVLPG instruction.
11299 *
11300 * @returns Strict VBox status code.
11301 * @retval VINF_PGM_SYNC_CR3
11302 *
11303 * @param pVCpu The cross context virtual CPU structure.
11304 * @param cbInstr The instruction length in bytes.
11305 * @param GCPtrPage The effective address of the page to invalidate.
11306 *
11307 * @remarks In ring-0 not all of the state needs to be synced in.
11308 */
11309VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
11310{
11311 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11312
11313 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11314 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
11315 Assert(!pVCpu->iem.s.cActiveMappings);
11316 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11317}
11318
11319
11320/**
11321 * Interface for HM and EM to emulate the INVPCID instruction.
11322 *
11323 * @returns Strict VBox status code.
11324 * @retval VINF_PGM_SYNC_CR3
11325 *
11326 * @param pVCpu The cross context virtual CPU structure.
11327 * @param cbInstr The instruction length in bytes.
11328 * @param iEffSeg The effective segment register.
11329 * @param GCPtrDesc The effective address of the INVPCID descriptor.
11330 * @param uType The invalidation type.
11331 *
11332 * @remarks In ring-0 not all of the state needs to be synced in.
11333 */
11334VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
11335 uint64_t uType)
11336{
11337 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
11338
11339 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11340 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
11341 Assert(!pVCpu->iem.s.cActiveMappings);
11342 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11343}
11344
11345
11346/**
11347 * Interface for HM and EM to emulate the CPUID instruction.
11348 *
11349 * @returns Strict VBox status code.
11350 *
11351 * @param pVCpu The cross context virtual CPU structure.
11352 * @param cbInstr The instruction length in bytes.
11353 *
11354 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
11355 */
11356VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
11357{
11358 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11359 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
11360
11361 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11362 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
11363 Assert(!pVCpu->iem.s.cActiveMappings);
11364 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11365}
11366
11367
11368/**
11369 * Interface for HM and EM to emulate the RDPMC instruction.
11370 *
11371 * @returns Strict VBox status code.
11372 *
11373 * @param pVCpu The cross context virtual CPU structure.
11374 * @param cbInstr The instruction length in bytes.
11375 *
11376 * @remarks Not all of the state needs to be synced in.
11377 */
11378VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
11379{
11380 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11381 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
11382
11383 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11384 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
11385 Assert(!pVCpu->iem.s.cActiveMappings);
11386 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11387}
11388
11389
11390/**
11391 * Interface for HM and EM to emulate the RDTSC instruction.
11392 *
11393 * @returns Strict VBox status code.
11394 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11395 *
11396 * @param pVCpu The cross context virtual CPU structure.
11397 * @param cbInstr The instruction length in bytes.
11398 *
11399 * @remarks Not all of the state needs to be synced in.
11400 */
11401VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
11402{
11403 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11404 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
11405
11406 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11407 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
11408 Assert(!pVCpu->iem.s.cActiveMappings);
11409 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11410}
11411
11412
11413/**
11414 * Interface for HM and EM to emulate the RDTSCP instruction.
11415 *
11416 * @returns Strict VBox status code.
11417 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11418 *
11419 * @param pVCpu The cross context virtual CPU structure.
11420 * @param cbInstr The instruction length in bytes.
11421 *
11422 * @remarks Not all of the state needs to be synced in. Recommended
11423 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
11424 */
11425VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
11426{
11427 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11428 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
11429
11430 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11431 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
11432 Assert(!pVCpu->iem.s.cActiveMappings);
11433 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11434}
11435
11436
11437/**
11438 * Interface for HM and EM to emulate the RDMSR instruction.
11439 *
11440 * @returns Strict VBox status code.
11441 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11442 *
11443 * @param pVCpu The cross context virtual CPU structure.
11444 * @param cbInstr The instruction length in bytes.
11445 *
11446 * @remarks Not all of the state needs to be synced in. Requires RCX and
11447 * (currently) all MSRs.
11448 */
11449VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
11450{
11451 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11452 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
11453
11454 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11455 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
11456 Assert(!pVCpu->iem.s.cActiveMappings);
11457 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11458}
11459
11460
11461/**
11462 * Interface for HM and EM to emulate the WRMSR instruction.
11463 *
11464 * @returns Strict VBox status code.
11465 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11466 *
11467 * @param pVCpu The cross context virtual CPU structure.
11468 * @param cbInstr The instruction length in bytes.
11469 *
11470 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
11471 * and (currently) all MSRs.
11472 */
11473VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
11474{
11475 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11476 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
11477 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
11478
11479 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11480 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
11481 Assert(!pVCpu->iem.s.cActiveMappings);
11482 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11483}
11484
11485
11486/**
11487 * Interface for HM and EM to emulate the MONITOR instruction.
11488 *
11489 * @returns Strict VBox status code.
11490 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11491 *
11492 * @param pVCpu The cross context virtual CPU structure.
11493 * @param cbInstr The instruction length in bytes.
11494 *
11495 * @remarks Not all of the state needs to be synced in.
11496 * @remarks ASSUMES the default segment of DS and no segment override prefixes
11497 * are used.
11498 */
11499VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
11500{
11501 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11502 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
11503
11504 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11505 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
11506 Assert(!pVCpu->iem.s.cActiveMappings);
11507 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11508}
11509
11510
11511/**
11512 * Interface for HM and EM to emulate the MWAIT instruction.
11513 *
11514 * @returns Strict VBox status code.
11515 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11516 *
11517 * @param pVCpu The cross context virtual CPU structure.
11518 * @param cbInstr The instruction length in bytes.
11519 *
11520 * @remarks Not all of the state needs to be synced in.
11521 */
11522VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
11523{
11524 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11525 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
11526
11527 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11528 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
11529 Assert(!pVCpu->iem.s.cActiveMappings);
11530 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11531}
11532
11533
11534/**
11535 * Interface for HM and EM to emulate the HLT instruction.
11536 *
11537 * @returns Strict VBox status code.
11538 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11539 *
11540 * @param pVCpu The cross context virtual CPU structure.
11541 * @param cbInstr The instruction length in bytes.
11542 *
11543 * @remarks Not all of the state needs to be synced in.
11544 */
11545VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
11546{
11547 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11548
11549 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11550 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
11551 Assert(!pVCpu->iem.s.cActiveMappings);
11552 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11553}
11554
11555
11556/**
11557 * Checks if IEM is in the process of delivering an event (interrupt or
11558 * exception).
11559 *
11560 * @returns true if we're in the process of raising an interrupt or exception,
11561 * false otherwise.
11562 * @param pVCpu The cross context virtual CPU structure.
11563 * @param puVector Where to store the vector associated with the
11564 * currently delivered event, optional.
11565 * @param pfFlags Where to store th event delivery flags (see
11566 * IEM_XCPT_FLAGS_XXX), optional.
11567 * @param puErr Where to store the error code associated with the
11568 * event, optional.
11569 * @param puCr2 Where to store the CR2 associated with the event,
11570 * optional.
11571 * @remarks The caller should check the flags to determine if the error code and
11572 * CR2 are valid for the event.
11573 */
11574VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
11575{
11576 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
11577 if (fRaisingXcpt)
11578 {
11579 if (puVector)
11580 *puVector = pVCpu->iem.s.uCurXcpt;
11581 if (pfFlags)
11582 *pfFlags = pVCpu->iem.s.fCurXcpt;
11583 if (puErr)
11584 *puErr = pVCpu->iem.s.uCurXcptErr;
11585 if (puCr2)
11586 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
11587 }
11588 return fRaisingXcpt;
11589}
11590
11591#ifdef IN_RING3
11592
11593/**
11594 * Handles the unlikely and probably fatal merge cases.
11595 *
11596 * @returns Merged status code.
11597 * @param rcStrict Current EM status code.
11598 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11599 * with @a rcStrict.
11600 * @param iMemMap The memory mapping index. For error reporting only.
11601 * @param pVCpu The cross context virtual CPU structure of the calling
11602 * thread, for error reporting only.
11603 */
11604DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
11605 unsigned iMemMap, PVMCPUCC pVCpu)
11606{
11607 if (RT_FAILURE_NP(rcStrict))
11608 return rcStrict;
11609
11610 if (RT_FAILURE_NP(rcStrictCommit))
11611 return rcStrictCommit;
11612
11613 if (rcStrict == rcStrictCommit)
11614 return rcStrictCommit;
11615
11616 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
11617 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
11618 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
11619 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
11620 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
11621 return VERR_IOM_FF_STATUS_IPE;
11622}
11623
11624
11625/**
11626 * Helper for IOMR3ProcessForceFlag.
11627 *
11628 * @returns Merged status code.
11629 * @param rcStrict Current EM status code.
11630 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11631 * with @a rcStrict.
11632 * @param iMemMap The memory mapping index. For error reporting only.
11633 * @param pVCpu The cross context virtual CPU structure of the calling
11634 * thread, for error reporting only.
11635 */
11636DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
11637{
11638 /* Simple. */
11639 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
11640 return rcStrictCommit;
11641
11642 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
11643 return rcStrict;
11644
11645 /* EM scheduling status codes. */
11646 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
11647 && rcStrict <= VINF_EM_LAST))
11648 {
11649 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
11650 && rcStrictCommit <= VINF_EM_LAST))
11651 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
11652 }
11653
11654 /* Unlikely */
11655 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
11656}
11657
11658
11659/**
11660 * Called by force-flag handling code when VMCPU_FF_IEM is set.
11661 *
11662 * @returns Merge between @a rcStrict and what the commit operation returned.
11663 * @param pVM The cross context VM structure.
11664 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11665 * @param rcStrict The status code returned by ring-0 or raw-mode.
11666 */
11667VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
11668{
11669 /*
11670 * Reset the pending commit.
11671 */
11672 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
11673 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
11674 ("%#x %#x %#x\n",
11675 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11676 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
11677
11678 /*
11679 * Commit the pending bounce buffers (usually just one).
11680 */
11681 unsigned cBufs = 0;
11682 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
11683 while (iMemMap-- > 0)
11684 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
11685 {
11686 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
11687 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
11688 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
11689
11690 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
11691 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
11692 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
11693
11694 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
11695 {
11696 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
11697 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
11698 pbBuf,
11699 cbFirst,
11700 PGMACCESSORIGIN_IEM);
11701 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
11702 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
11703 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
11704 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
11705 }
11706
11707 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
11708 {
11709 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
11710 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
11711 pbBuf + cbFirst,
11712 cbSecond,
11713 PGMACCESSORIGIN_IEM);
11714 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
11715 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
11716 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
11717 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
11718 }
11719 cBufs++;
11720 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
11721 }
11722
11723 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
11724 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
11725 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11726 pVCpu->iem.s.cActiveMappings = 0;
11727 return rcStrict;
11728}
11729
11730#endif /* IN_RING3 */
11731
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette