VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 100815

Last change on this file since 100815 was 100815, checked in by vboxsync, 16 months ago

VMM/IEM: Tiny data TLB usage tweak. bugref:10369

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 479.0 KB
Line 
1/* $Id: IEMAll.cpp 100815 2023-08-07 10:32:22Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_iem IEM - Interpreted Execution Manager
30 *
31 * The interpreted exeuction manager (IEM) is for executing short guest code
32 * sequences that are causing too many exits / virtualization traps. It will
33 * also be used to interpret single instructions, thus replacing the selective
34 * interpreters in EM and IOM.
35 *
36 * Design goals:
37 * - Relatively small footprint, although we favour speed and correctness
38 * over size.
39 * - Reasonably fast.
40 * - Correctly handle lock prefixed instructions.
41 * - Complete instruction set - eventually.
42 * - Refactorable into a recompiler, maybe.
43 * - Replace EMInterpret*.
44 *
45 * Using the existing disassembler has been considered, however this is thought
46 * to conflict with speed as the disassembler chews things a bit too much while
47 * leaving us with a somewhat complicated state to interpret afterwards.
48 *
49 *
50 * The current code is very much work in progress. You've been warned!
51 *
52 *
53 * @section sec_iem_fpu_instr FPU Instructions
54 *
55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
56 * same or equivalent instructions on the host FPU. To make life easy, we also
57 * let the FPU prioritize the unmasked exceptions for us. This however, only
58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
59 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
60 * can trigger spurious FPU exceptions.
61 *
62 * The guest FPU state is not loaded into the host CPU and kept there till we
63 * leave IEM because the calling conventions have declared an all year open
64 * season on much of the FPU state. For instance an innocent looking call to
65 * memcpy might end up using a whole bunch of XMM or MM registers if the
66 * particular implementation finds it worthwhile.
67 *
68 *
69 * @section sec_iem_logging Logging
70 *
71 * The IEM code uses the \"IEM\" log group for the main logging. The different
72 * logging levels/flags are generally used for the following purposes:
73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
74 * - Flow (LogFlow) : Basic enter/exit IEM state info.
75 * - Level 2 (Log2) : ?
76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
78 * - Level 5 (Log5) : Decoding details.
79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
80 * - Level 7 (Log7) : iret++ execution logging.
81 * - Level 8 (Log8) : Memory writes.
82 * - Level 9 (Log9) : Memory reads.
83 * - Level 10 (Log10): TLBs.
84 * - Level 11 (Log11): Unmasked FPU exceptions.
85 *
86 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
87 * - Level 1 (Log) : Errors and other major events.
88 * - Flow (LogFlow) : Misc flow stuff (cleanup?)
89 * - Level 2 (Log2) : VM exits.
90 *
91 * The syscall logging level assignments:
92 * - Level 1: DOS and BIOS.
93 * - Level 2: Windows 3.x
94 * - Level 3: Linux.
95 */
96
97/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
98#ifdef _MSC_VER
99# pragma warning(disable:4505)
100#endif
101
102
103/*********************************************************************************************************************************
104* Header Files *
105*********************************************************************************************************************************/
106#define LOG_GROUP LOG_GROUP_IEM
107#define VMCPU_INCL_CPUM_GST_CTX
108#include <VBox/vmm/iem.h>
109#include <VBox/vmm/cpum.h>
110#include <VBox/vmm/apic.h>
111#include <VBox/vmm/pdm.h>
112#include <VBox/vmm/pgm.h>
113#include <VBox/vmm/iom.h>
114#include <VBox/vmm/em.h>
115#include <VBox/vmm/hm.h>
116#include <VBox/vmm/nem.h>
117#include <VBox/vmm/gim.h>
118#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
119# include <VBox/vmm/em.h>
120# include <VBox/vmm/hm_svm.h>
121#endif
122#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
123# include <VBox/vmm/hmvmxinline.h>
124#endif
125#include <VBox/vmm/tm.h>
126#include <VBox/vmm/dbgf.h>
127#include <VBox/vmm/dbgftrace.h>
128#include "IEMInternal.h"
129#include <VBox/vmm/vmcc.h>
130#include <VBox/log.h>
131#include <VBox/err.h>
132#include <VBox/param.h>
133#include <VBox/dis.h>
134#include <iprt/asm-math.h>
135#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
136# include <iprt/asm-amd64-x86.h>
137#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
138# include <iprt/asm-arm.h>
139#endif
140#include <iprt/assert.h>
141#include <iprt/string.h>
142#include <iprt/x86.h>
143
144#include "IEMInline.h"
145
146
147/*********************************************************************************************************************************
148* Structures and Typedefs *
149*********************************************************************************************************************************/
150/**
151 * CPU exception classes.
152 */
153typedef enum IEMXCPTCLASS
154{
155 IEMXCPTCLASS_BENIGN,
156 IEMXCPTCLASS_CONTRIBUTORY,
157 IEMXCPTCLASS_PAGE_FAULT,
158 IEMXCPTCLASS_DOUBLE_FAULT
159} IEMXCPTCLASS;
160
161
162/*********************************************************************************************************************************
163* Global Variables *
164*********************************************************************************************************************************/
165#if defined(IEM_LOG_MEMORY_WRITES)
166/** What IEM just wrote. */
167uint8_t g_abIemWrote[256];
168/** How much IEM just wrote. */
169size_t g_cbIemWrote;
170#endif
171
172
173/*********************************************************************************************************************************
174* Internal Functions *
175*********************************************************************************************************************************/
176static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
177 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
178
179
180/**
181 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags, slow code
182 * path.
183 *
184 * @returns IEM_F_BRK_PENDING_XXX or zero.
185 * @param pVCpu The cross context virtual CPU structure of the
186 * calling thread.
187 *
188 * @note Don't call directly, use iemCalcExecDbgFlags instead.
189 */
190uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu)
191{
192 uint32_t fExec = 0;
193
194 /*
195 * Process guest breakpoints.
196 */
197#define PROCESS_ONE_BP(a_fDr7, a_iBp) do { \
198 if (a_fDr7 & X86_DR7_L_G(a_iBp)) \
199 { \
200 switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \
201 { \
202 case X86_DR7_RW_EO: \
203 fExec |= IEM_F_PENDING_BRK_INSTR; \
204 break; \
205 case X86_DR7_RW_WO: \
206 case X86_DR7_RW_RW: \
207 fExec |= IEM_F_PENDING_BRK_DATA; \
208 break; \
209 case X86_DR7_RW_IO: \
210 fExec |= IEM_F_PENDING_BRK_X86_IO; \
211 break; \
212 } \
213 } \
214 } while (0)
215
216 uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];
217 if (fGstDr7 & X86_DR7_ENABLED_MASK)
218 {
219 PROCESS_ONE_BP(fGstDr7, 0);
220 PROCESS_ONE_BP(fGstDr7, 1);
221 PROCESS_ONE_BP(fGstDr7, 2);
222 PROCESS_ONE_BP(fGstDr7, 3);
223 }
224
225 /*
226 * Process hypervisor breakpoints.
227 */
228 uint32_t const fHyperDr7 = DBGFBpGetDR7(pVCpu->CTX_SUFF(pVM));
229 if (fHyperDr7 & X86_DR7_ENABLED_MASK)
230 {
231 PROCESS_ONE_BP(fHyperDr7, 0);
232 PROCESS_ONE_BP(fHyperDr7, 1);
233 PROCESS_ONE_BP(fHyperDr7, 2);
234 PROCESS_ONE_BP(fHyperDr7, 3);
235 }
236
237 return fExec;
238}
239
240
241/**
242 * Initializes the decoder state.
243 *
244 * iemReInitDecoder is mostly a copy of this function.
245 *
246 * @param pVCpu The cross context virtual CPU structure of the
247 * calling thread.
248 * @param fExecOpts Optional execution flags:
249 * - IEM_F_BYPASS_HANDLERS
250 * - IEM_F_X86_DISREGARD_LOCK
251 */
252DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, uint32_t fExecOpts)
253{
254 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
255 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
256 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
257 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
258 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
259 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
260 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
261 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
262 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
263 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
264
265 /* Execution state: */
266 uint32_t fExec;
267 pVCpu->iem.s.fExec = fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
268
269 /* Decoder state: */
270 pVCpu->iem.s.enmDefAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
271 pVCpu->iem.s.enmEffAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK;
272 if ((fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)
273 {
274 pVCpu->iem.s.enmDefOpSize = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
275 pVCpu->iem.s.enmEffOpSize = fExec & IEM_F_MODE_CPUMODE_MASK;
276 }
277 else
278 {
279 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
280 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
281 }
282 pVCpu->iem.s.fPrefixes = 0;
283 pVCpu->iem.s.uRexReg = 0;
284 pVCpu->iem.s.uRexB = 0;
285 pVCpu->iem.s.uRexIndex = 0;
286 pVCpu->iem.s.idxPrefix = 0;
287 pVCpu->iem.s.uVex3rdReg = 0;
288 pVCpu->iem.s.uVexLength = 0;
289 pVCpu->iem.s.fEvexStuff = 0;
290 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
291#ifdef IEM_WITH_CODE_TLB
292 pVCpu->iem.s.pbInstrBuf = NULL;
293 pVCpu->iem.s.offInstrNextByte = 0;
294 pVCpu->iem.s.offCurInstrStart = 0;
295# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
296 pVCpu->iem.s.offOpcode = 0;
297# endif
298# ifdef VBOX_STRICT
299 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
300 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
301 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
302 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
303# endif
304#else
305 pVCpu->iem.s.offOpcode = 0;
306 pVCpu->iem.s.cbOpcode = 0;
307#endif
308 pVCpu->iem.s.offModRm = 0;
309 pVCpu->iem.s.cActiveMappings = 0;
310 pVCpu->iem.s.iNextMapping = 0;
311 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
312
313#ifdef DBGFTRACE_ENABLED
314 switch (IEM_GET_CPU_MODE(pVCpu))
315 {
316 case IEMMODE_64BIT:
317 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
318 break;
319 case IEMMODE_32BIT:
320 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
321 break;
322 case IEMMODE_16BIT:
323 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
324 break;
325 }
326#endif
327}
328
329
330/**
331 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
332 *
333 * This is mostly a copy of iemInitDecoder.
334 *
335 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
336 */
337DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
338{
339 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
340 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
341 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
342 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
343 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
344 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
345 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
346 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
347 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
348
349 /* ASSUMES: Anyone changing CPU state affecting the fExec bits will update them! */
350 AssertMsg((pVCpu->iem.s.fExec & ~IEM_F_USER_OPTS) == iemCalcExecFlags(pVCpu),
351 ("fExec=%#x iemCalcExecModeFlags=%#x\n", pVCpu->iem.s.fExec, iemCalcExecFlags(pVCpu)));
352
353 IEMMODE const enmMode = IEM_GET_CPU_MODE(pVCpu);
354 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
355 pVCpu->iem.s.enmEffAddrMode = enmMode;
356 if (enmMode != IEMMODE_64BIT)
357 {
358 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
359 pVCpu->iem.s.enmEffOpSize = enmMode;
360 }
361 else
362 {
363 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
364 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
365 }
366 pVCpu->iem.s.fPrefixes = 0;
367 pVCpu->iem.s.uRexReg = 0;
368 pVCpu->iem.s.uRexB = 0;
369 pVCpu->iem.s.uRexIndex = 0;
370 pVCpu->iem.s.idxPrefix = 0;
371 pVCpu->iem.s.uVex3rdReg = 0;
372 pVCpu->iem.s.uVexLength = 0;
373 pVCpu->iem.s.fEvexStuff = 0;
374 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
375#ifdef IEM_WITH_CODE_TLB
376 if (pVCpu->iem.s.pbInstrBuf)
377 {
378 uint64_t off = (enmMode == IEMMODE_64BIT
379 ? pVCpu->cpum.GstCtx.rip
380 : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
381 - pVCpu->iem.s.uInstrBufPc;
382 if (off < pVCpu->iem.s.cbInstrBufTotal)
383 {
384 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
385 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
386 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
387 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
388 else
389 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
390 }
391 else
392 {
393 pVCpu->iem.s.pbInstrBuf = NULL;
394 pVCpu->iem.s.offInstrNextByte = 0;
395 pVCpu->iem.s.offCurInstrStart = 0;
396 pVCpu->iem.s.cbInstrBuf = 0;
397 pVCpu->iem.s.cbInstrBufTotal = 0;
398 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
399 }
400 }
401 else
402 {
403 pVCpu->iem.s.offInstrNextByte = 0;
404 pVCpu->iem.s.offCurInstrStart = 0;
405 pVCpu->iem.s.cbInstrBuf = 0;
406 pVCpu->iem.s.cbInstrBufTotal = 0;
407# ifdef VBOX_STRICT
408 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
409# endif
410 }
411# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
412 pVCpu->iem.s.offOpcode = 0;
413# endif
414#else /* !IEM_WITH_CODE_TLB */
415 pVCpu->iem.s.cbOpcode = 0;
416 pVCpu->iem.s.offOpcode = 0;
417#endif /* !IEM_WITH_CODE_TLB */
418 pVCpu->iem.s.offModRm = 0;
419 Assert(pVCpu->iem.s.cActiveMappings == 0);
420 pVCpu->iem.s.iNextMapping = 0;
421 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
422 Assert(!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS));
423
424#ifdef DBGFTRACE_ENABLED
425 switch (enmMode)
426 {
427 case IEMMODE_64BIT:
428 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
429 break;
430 case IEMMODE_32BIT:
431 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
432 break;
433 case IEMMODE_16BIT:
434 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
435 break;
436 }
437#endif
438}
439
440
441
442/**
443 * Prefetch opcodes the first time when starting executing.
444 *
445 * @returns Strict VBox status code.
446 * @param pVCpu The cross context virtual CPU structure of the
447 * calling thread.
448 * @param fExecOpts Optional execution flags:
449 * - IEM_F_BYPASS_HANDLERS
450 * - IEM_F_X86_DISREGARD_LOCK
451 */
452static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
453{
454 iemInitDecoder(pVCpu, fExecOpts);
455
456#ifndef IEM_WITH_CODE_TLB
457 /*
458 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
459 *
460 * First translate CS:rIP to a physical address.
461 *
462 * Note! The iemOpcodeFetchMoreBytes code depends on this here code to fetch
463 * all relevant bytes from the first page, as it ASSUMES it's only ever
464 * called for dealing with CS.LIM, page crossing and instructions that
465 * are too long.
466 */
467 uint32_t cbToTryRead;
468 RTGCPTR GCPtrPC;
469 if (IEM_IS_64BIT_CODE(pVCpu))
470 {
471 cbToTryRead = GUEST_PAGE_SIZE;
472 GCPtrPC = pVCpu->cpum.GstCtx.rip;
473 if (IEM_IS_CANONICAL(GCPtrPC))
474 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
475 else
476 return iemRaiseGeneralProtectionFault0(pVCpu);
477 }
478 else
479 {
480 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
481 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu), ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
482 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
483 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
484 else
485 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
486 if (cbToTryRead) { /* likely */ }
487 else /* overflowed */
488 {
489 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
490 cbToTryRead = UINT32_MAX;
491 }
492 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
493 Assert(GCPtrPC <= UINT32_MAX);
494 }
495
496 PGMPTWALK Walk;
497 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk);
498 if (RT_SUCCESS(rc))
499 Assert(Walk.fSucceeded); /* probable. */
500 else
501 {
502 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
503# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
504 if (Walk.fFailed & PGM_WALKFAIL_EPT)
505 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
506# endif
507 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);
508 }
509 if ((Walk.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3) { /* likely */ }
510 else
511 {
512 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
513# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
514 if (Walk.fFailed & PGM_WALKFAIL_EPT)
515 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
516# endif
517 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
518 }
519 if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
520 else
521 {
522 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
523# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
524 if (Walk.fFailed & PGM_WALKFAIL_EPT)
525 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
526# endif
527 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
528 }
529 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
530 /** @todo Check reserved bits and such stuff. PGM is better at doing
531 * that, so do it when implementing the guest virtual address
532 * TLB... */
533
534 /*
535 * Read the bytes at this address.
536 */
537 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
538 if (cbToTryRead > cbLeftOnPage)
539 cbToTryRead = cbLeftOnPage;
540 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
541 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
542
543 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
544 {
545 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
546 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
547 { /* likely */ }
548 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
549 {
550 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
551 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
552 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
553 }
554 else
555 {
556 Log((RT_SUCCESS(rcStrict)
557 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
558 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
559 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
560 return rcStrict;
561 }
562 }
563 else
564 {
565 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
566 if (RT_SUCCESS(rc))
567 { /* likely */ }
568 else
569 {
570 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
571 GCPtrPC, GCPhys, rc, cbToTryRead));
572 return rc;
573 }
574 }
575 pVCpu->iem.s.cbOpcode = cbToTryRead;
576#endif /* !IEM_WITH_CODE_TLB */
577 return VINF_SUCCESS;
578}
579
580
581/**
582 * Invalidates the IEM TLBs.
583 *
584 * This is called internally as well as by PGM when moving GC mappings.
585 *
586 * @param pVCpu The cross context virtual CPU structure of the calling
587 * thread.
588 */
589VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
590{
591#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
592 Log10(("IEMTlbInvalidateAll\n"));
593# ifdef IEM_WITH_CODE_TLB
594 pVCpu->iem.s.cbInstrBufTotal = 0;
595 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
596 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
597 { /* very likely */ }
598 else
599 {
600 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
601 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
602 while (i-- > 0)
603 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
604 }
605# endif
606
607# ifdef IEM_WITH_DATA_TLB
608 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
609 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
610 { /* very likely */ }
611 else
612 {
613 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
614 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
615 while (i-- > 0)
616 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
617 }
618# endif
619#else
620 RT_NOREF(pVCpu);
621#endif
622}
623
624
625/**
626 * Invalidates a page in the TLBs.
627 *
628 * @param pVCpu The cross context virtual CPU structure of the calling
629 * thread.
630 * @param GCPtr The address of the page to invalidate
631 * @thread EMT(pVCpu)
632 */
633VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
634{
635#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
636 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
637 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
638 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
639 uintptr_t const idx = IEMTLB_TAG_TO_INDEX(GCPtr);
640
641# ifdef IEM_WITH_CODE_TLB
642 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
643 {
644 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
645 if (GCPtr == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
646 pVCpu->iem.s.cbInstrBufTotal = 0;
647 }
648# endif
649
650# ifdef IEM_WITH_DATA_TLB
651 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
652 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
653# endif
654#else
655 NOREF(pVCpu); NOREF(GCPtr);
656#endif
657}
658
659
660#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
661/**
662 * Invalid both TLBs slow fashion following a rollover.
663 *
664 * Worker for IEMTlbInvalidateAllPhysical,
665 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
666 * iemMemMapJmp and others.
667 *
668 * @thread EMT(pVCpu)
669 */
670static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
671{
672 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
673 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
674 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
675
676 unsigned i;
677# ifdef IEM_WITH_CODE_TLB
678 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
679 while (i-- > 0)
680 {
681 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
682 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
683 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
684 }
685# endif
686# ifdef IEM_WITH_DATA_TLB
687 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
688 while (i-- > 0)
689 {
690 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
691 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
692 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
693 }
694# endif
695
696}
697#endif
698
699
700/**
701 * Invalidates the host physical aspects of the IEM TLBs.
702 *
703 * This is called internally as well as by PGM when moving GC mappings.
704 *
705 * @param pVCpu The cross context virtual CPU structure of the calling
706 * thread.
707 * @note Currently not used.
708 */
709VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
710{
711#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
712 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
713 Log10(("IEMTlbInvalidateAllPhysical\n"));
714
715# ifdef IEM_WITH_CODE_TLB
716 pVCpu->iem.s.cbInstrBufTotal = 0;
717# endif
718 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
719 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
720 {
721 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
722 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
723 }
724 else
725 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
726#else
727 NOREF(pVCpu);
728#endif
729}
730
731
732/**
733 * Invalidates the host physical aspects of the IEM TLBs.
734 *
735 * This is called internally as well as by PGM when moving GC mappings.
736 *
737 * @param pVM The cross context VM structure.
738 * @param idCpuCaller The ID of the calling EMT if available to the caller,
739 * otherwise NIL_VMCPUID.
740 *
741 * @remarks Caller holds the PGM lock.
742 */
743VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller)
744{
745#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
746 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
747 if (pVCpuCaller)
748 VMCPU_ASSERT_EMT(pVCpuCaller);
749 Log10(("IEMTlbInvalidateAllPhysicalAllCpus\n"));
750
751 VMCC_FOR_EACH_VMCPU(pVM)
752 {
753# ifdef IEM_WITH_CODE_TLB
754 if (pVCpuCaller == pVCpu)
755 pVCpu->iem.s.cbInstrBufTotal = 0;
756# endif
757
758 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
759 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
760 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
761 { /* likely */}
762 else if (pVCpuCaller == pVCpu)
763 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
764 else
765 {
766 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
767 continue;
768 }
769 ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
770 ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
771 }
772 VMCC_FOR_EACH_VMCPU_END(pVM);
773
774#else
775 RT_NOREF(pVM, idCpuCaller);
776#endif
777}
778
779
780/**
781 * Flushes the prefetch buffer, light version.
782 */
783void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr)
784{
785#ifndef IEM_WITH_CODE_TLB
786 pVCpu->iem.s.cbOpcode = cbInstr;
787#else
788 RT_NOREF(pVCpu, cbInstr);
789#endif
790}
791
792
793/**
794 * Flushes the prefetch buffer, heavy version.
795 */
796void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr)
797{
798#ifndef IEM_WITH_CODE_TLB
799 pVCpu->iem.s.cbOpcode = cbInstr; /* Note! SVM and VT-x may set this to zero on exit, rather than the instruction length. */
800#elif 1
801 pVCpu->iem.s.pbInstrBuf = NULL;
802 pVCpu->iem.s.cbInstrBufTotal = 0;
803 RT_NOREF(cbInstr);
804#else
805 RT_NOREF(pVCpu, cbInstr);
806#endif
807}
808
809
810
811#ifdef IEM_WITH_CODE_TLB
812
813/**
814 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
815 * failure and jumps.
816 *
817 * We end up here for a number of reasons:
818 * - pbInstrBuf isn't yet initialized.
819 * - Advancing beyond the buffer boundrary (e.g. cross page).
820 * - Advancing beyond the CS segment limit.
821 * - Fetching from non-mappable page (e.g. MMIO).
822 *
823 * @param pVCpu The cross context virtual CPU structure of the
824 * calling thread.
825 * @param pvDst Where to return the bytes.
826 * @param cbDst Number of bytes to read. A value of zero is
827 * allowed for initializing pbInstrBuf (the
828 * recompiler does this). In this case it is best
829 * to set pbInstrBuf to NULL prior to the call.
830 */
831void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP
832{
833# ifdef IN_RING3
834 for (;;)
835 {
836 Assert(cbDst <= 8);
837 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
838
839 /*
840 * We might have a partial buffer match, deal with that first to make the
841 * rest simpler. This is the first part of the cross page/buffer case.
842 */
843 if (pVCpu->iem.s.pbInstrBuf != NULL)
844 {
845 if (offBuf < pVCpu->iem.s.cbInstrBuf)
846 {
847 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
848 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
849 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
850
851 cbDst -= cbCopy;
852 pvDst = (uint8_t *)pvDst + cbCopy;
853 offBuf += cbCopy;
854 pVCpu->iem.s.offInstrNextByte += offBuf;
855 }
856 }
857
858 /*
859 * Check segment limit, figuring how much we're allowed to access at this point.
860 *
861 * We will fault immediately if RIP is past the segment limit / in non-canonical
862 * territory. If we do continue, there are one or more bytes to read before we
863 * end up in trouble and we need to do that first before faulting.
864 */
865 RTGCPTR GCPtrFirst;
866 uint32_t cbMaxRead;
867 if (IEM_IS_64BIT_CODE(pVCpu))
868 {
869 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
870 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
871 { /* likely */ }
872 else
873 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
874 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
875 }
876 else
877 {
878 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
879 /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
880 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
881 { /* likely */ }
882 else /** @todo For CPUs older than the 386, we should not necessarily generate \#GP here but wrap around! */
883 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
884 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
885 if (cbMaxRead != 0)
886 { /* likely */ }
887 else
888 {
889 /* Overflowed because address is 0 and limit is max. */
890 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
891 cbMaxRead = X86_PAGE_SIZE;
892 }
893 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
894 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
895 if (cbMaxRead2 < cbMaxRead)
896 cbMaxRead = cbMaxRead2;
897 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
898 }
899
900 /*
901 * Get the TLB entry for this piece of code.
902 */
903 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.CodeTlb, GCPtrFirst);
904 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.CodeTlb, uTag);
905 if (pTlbe->uTag == uTag)
906 {
907 /* likely when executing lots of code, otherwise unlikely */
908# ifdef VBOX_WITH_STATISTICS
909 pVCpu->iem.s.CodeTlb.cTlbHits++;
910# endif
911 }
912 else
913 {
914 pVCpu->iem.s.CodeTlb.cTlbMisses++;
915 PGMPTWALK Walk;
916 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk);
917 if (RT_FAILURE(rc))
918 {
919#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
920 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? */
921 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
922#endif
923 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
924 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, rc);
925 }
926
927 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
928 Assert(Walk.fSucceeded);
929 pTlbe->uTag = uTag;
930 pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
931 | (Walk.fEffective >> X86_PTE_PAE_BIT_NX);
932 pTlbe->GCPhys = Walk.GCPhys;
933 pTlbe->pbMappingR3 = NULL;
934 }
935
936 /*
937 * Check TLB page table level access flags.
938 */
939 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
940 {
941 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && IEM_GET_CPL(pVCpu) == 3)
942 {
943 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
944 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
945 }
946 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
947 {
948 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
949 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
950 }
951 }
952
953 /*
954 * Look up the physical page info if necessary.
955 */
956 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
957 { /* not necessary */ }
958 else
959 {
960 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
961 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
962 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
963 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
964 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
965 { /* likely */ }
966 else
967 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
968 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
969 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
970 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
971 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
972 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
973 }
974
975# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
976 /*
977 * Try do a direct read using the pbMappingR3 pointer.
978 */
979 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
980 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
981 {
982 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
983 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
984 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
985 {
986 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
987 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
988 }
989 else
990 {
991 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
992 if (cbInstr + (uint32_t)cbDst <= 15)
993 {
994 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
995 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
996 }
997 else
998 {
999 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1000 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1001 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1002 }
1003 }
1004 if (cbDst <= cbMaxRead)
1005 {
1006 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0; /** @todo Spurious load effect on branch handling? */
1007 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1008
1009 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1010 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1011 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1012 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1013 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1014 return;
1015 }
1016 pVCpu->iem.s.pbInstrBuf = NULL;
1017
1018 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1019 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1020 }
1021# else
1022# error "refactor as needed"
1023 /*
1024 * If there is no special read handling, so we can read a bit more and
1025 * put it in the prefetch buffer.
1026 */
1027 if ( cbDst < cbMaxRead
1028 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1029 {
1030 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1031 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1032 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1033 { /* likely */ }
1034 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1035 {
1036 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1037 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1038 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1039 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));
1040 }
1041 else
1042 {
1043 Log((RT_SUCCESS(rcStrict)
1044 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1045 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1046 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1047 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1048 }
1049 }
1050# endif
1051 /*
1052 * Special read handling, so only read exactly what's needed.
1053 * This is a highly unlikely scenario.
1054 */
1055 else
1056 {
1057 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1058
1059 /* Check instruction length. */
1060 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1061 if (RT_LIKELY(cbInstr + cbDst <= 15))
1062 { /* likely */ }
1063 else
1064 {
1065 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0) [slow]\n",
1066 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1067 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1068 }
1069
1070 /* Do the reading. */
1071 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1072 if (cbToRead > 0)
1073 {
1074 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1075 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1076 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1077 { /* likely */ }
1078 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1079 {
1080 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1081 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1082 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1083 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));
1084 }
1085 else
1086 {
1087 Log((RT_SUCCESS(rcStrict)
1088 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1089 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1090 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1091 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1092 }
1093 }
1094
1095 /* Update the state and probably return. */
1096 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1097 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0;
1098 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1099
1100 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1101 pVCpu->iem.s.offInstrNextByte = offPg + cbInstr + cbToRead;
1102 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead + cbInstr) - cbToRead - cbInstr;
1103 pVCpu->iem.s.cbInstrBufTotal = X86_PAGE_SIZE; /** @todo ??? */
1104 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1105 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1106 pVCpu->iem.s.pbInstrBuf = NULL;
1107 if (cbToRead == cbDst)
1108 return;
1109 }
1110
1111 /*
1112 * More to read, loop.
1113 */
1114 cbDst -= cbMaxRead;
1115 pvDst = (uint8_t *)pvDst + cbMaxRead;
1116 }
1117# else /* !IN_RING3 */
1118 RT_NOREF(pvDst, cbDst);
1119 if (pvDst || cbDst)
1120 IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);
1121# endif /* !IN_RING3 */
1122}
1123
1124#else /* !IEM_WITH_CODE_TLB */
1125
1126/**
1127 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1128 * exception if it fails.
1129 *
1130 * @returns Strict VBox status code.
1131 * @param pVCpu The cross context virtual CPU structure of the
1132 * calling thread.
1133 * @param cbMin The minimum number of bytes relative offOpcode
1134 * that must be read.
1135 */
1136VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
1137{
1138 /*
1139 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1140 *
1141 * First translate CS:rIP to a physical address.
1142 */
1143 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;
1144 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1145 uint8_t const cbLeft = cbOpcode - offOpcode;
1146 Assert(cbLeft < cbMin);
1147 Assert(cbOpcode <= sizeof(pVCpu->iem.s.abOpcode));
1148
1149 uint32_t cbToTryRead;
1150 RTGCPTR GCPtrNext;
1151 if (IEM_IS_64BIT_CODE(pVCpu))
1152 {
1153 GCPtrNext = pVCpu->cpum.GstCtx.rip + cbOpcode;
1154 if (!IEM_IS_CANONICAL(GCPtrNext))
1155 return iemRaiseGeneralProtectionFault0(pVCpu);
1156 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1157 }
1158 else
1159 {
1160 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1161 /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
1162 GCPtrNext32 += cbOpcode;
1163 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1164 /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
1165 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1166 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1167 if (!cbToTryRead) /* overflowed */
1168 {
1169 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1170 cbToTryRead = UINT32_MAX;
1171 /** @todo check out wrapping around the code segment. */
1172 }
1173 if (cbToTryRead < cbMin - cbLeft)
1174 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1175 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1176
1177 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1178 if (cbToTryRead > cbLeftOnPage)
1179 cbToTryRead = cbLeftOnPage;
1180 }
1181
1182 /* Restrict to opcode buffer space.
1183
1184 We're making ASSUMPTIONS here based on work done previously in
1185 iemInitDecoderAndPrefetchOpcodes, where bytes from the first page will
1186 be fetched in case of an instruction crossing two pages. */
1187 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - cbOpcode)
1188 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - cbOpcode;
1189 if (RT_LIKELY(cbToTryRead + cbLeft >= cbMin))
1190 { /* likely */ }
1191 else
1192 {
1193 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1194 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, offOpcode, cbMin));
1195 return iemRaiseGeneralProtectionFault0(pVCpu);
1196 }
1197
1198 PGMPTWALK Walk;
1199 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk);
1200 if (RT_FAILURE(rc))
1201 {
1202 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1203#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1204 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1205 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1206#endif
1207 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc);
1208 }
1209 if (!(Walk.fEffective & X86_PTE_US) && IEM_GET_CPL(pVCpu) == 3)
1210 {
1211 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1212#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1213 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1214 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1215#endif
1216 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1217 }
1218 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1219 {
1220 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1221#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1222 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1223 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1224#endif
1225 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1226 }
1227 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1228 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode));
1229 /** @todo Check reserved bits and such stuff. PGM is better at doing
1230 * that, so do it when implementing the guest virtual address
1231 * TLB... */
1232
1233 /*
1234 * Read the bytes at this address.
1235 *
1236 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1237 * and since PATM should only patch the start of an instruction there
1238 * should be no need to check again here.
1239 */
1240 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
1241 {
1242 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[cbOpcode],
1243 cbToTryRead, PGMACCESSORIGIN_IEM);
1244 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1245 { /* likely */ }
1246 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1247 {
1248 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1249 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1250 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1251 }
1252 else
1253 {
1254 Log((RT_SUCCESS(rcStrict)
1255 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1256 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1257 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1258 return rcStrict;
1259 }
1260 }
1261 else
1262 {
1263 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[cbOpcode], GCPhys, cbToTryRead);
1264 if (RT_SUCCESS(rc))
1265 { /* likely */ }
1266 else
1267 {
1268 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1269 return rc;
1270 }
1271 }
1272 pVCpu->iem.s.cbOpcode = cbOpcode + cbToTryRead;
1273 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1274
1275 return VINF_SUCCESS;
1276}
1277
1278#endif /* !IEM_WITH_CODE_TLB */
1279#ifndef IEM_WITH_SETJMP
1280
1281/**
1282 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1283 *
1284 * @returns Strict VBox status code.
1285 * @param pVCpu The cross context virtual CPU structure of the
1286 * calling thread.
1287 * @param pb Where to return the opcode byte.
1288 */
1289VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1290{
1291 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1292 if (rcStrict == VINF_SUCCESS)
1293 {
1294 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1295 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1296 pVCpu->iem.s.offOpcode = offOpcode + 1;
1297 }
1298 else
1299 *pb = 0;
1300 return rcStrict;
1301}
1302
1303#else /* IEM_WITH_SETJMP */
1304
1305/**
1306 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1307 *
1308 * @returns The opcode byte.
1309 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1310 */
1311uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1312{
1313# ifdef IEM_WITH_CODE_TLB
1314 uint8_t u8;
1315 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1316 return u8;
1317# else
1318 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1319 if (rcStrict == VINF_SUCCESS)
1320 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1321 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1322# endif
1323}
1324
1325#endif /* IEM_WITH_SETJMP */
1326
1327#ifndef IEM_WITH_SETJMP
1328
1329/**
1330 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1331 *
1332 * @returns Strict VBox status code.
1333 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1334 * @param pu16 Where to return the opcode dword.
1335 */
1336VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1337{
1338 uint8_t u8;
1339 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1340 if (rcStrict == VINF_SUCCESS)
1341 *pu16 = (int8_t)u8;
1342 return rcStrict;
1343}
1344
1345
1346/**
1347 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1348 *
1349 * @returns Strict VBox status code.
1350 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1351 * @param pu32 Where to return the opcode dword.
1352 */
1353VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1354{
1355 uint8_t u8;
1356 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1357 if (rcStrict == VINF_SUCCESS)
1358 *pu32 = (int8_t)u8;
1359 return rcStrict;
1360}
1361
1362
1363/**
1364 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1365 *
1366 * @returns Strict VBox status code.
1367 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1368 * @param pu64 Where to return the opcode qword.
1369 */
1370VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1371{
1372 uint8_t u8;
1373 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1374 if (rcStrict == VINF_SUCCESS)
1375 *pu64 = (int8_t)u8;
1376 return rcStrict;
1377}
1378
1379#endif /* !IEM_WITH_SETJMP */
1380
1381
1382#ifndef IEM_WITH_SETJMP
1383
1384/**
1385 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1386 *
1387 * @returns Strict VBox status code.
1388 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1389 * @param pu16 Where to return the opcode word.
1390 */
1391VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1392{
1393 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1394 if (rcStrict == VINF_SUCCESS)
1395 {
1396 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1397# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1398 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1399# else
1400 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1401# endif
1402 pVCpu->iem.s.offOpcode = offOpcode + 2;
1403 }
1404 else
1405 *pu16 = 0;
1406 return rcStrict;
1407}
1408
1409#else /* IEM_WITH_SETJMP */
1410
1411/**
1412 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1413 *
1414 * @returns The opcode word.
1415 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1416 */
1417uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1418{
1419# ifdef IEM_WITH_CODE_TLB
1420 uint16_t u16;
1421 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1422 return u16;
1423# else
1424 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1425 if (rcStrict == VINF_SUCCESS)
1426 {
1427 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1428 pVCpu->iem.s.offOpcode += 2;
1429# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1430 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1431# else
1432 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1433# endif
1434 }
1435 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1436# endif
1437}
1438
1439#endif /* IEM_WITH_SETJMP */
1440
1441#ifndef IEM_WITH_SETJMP
1442
1443/**
1444 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1445 *
1446 * @returns Strict VBox status code.
1447 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1448 * @param pu32 Where to return the opcode double word.
1449 */
1450VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1451{
1452 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1453 if (rcStrict == VINF_SUCCESS)
1454 {
1455 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1456 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1457 pVCpu->iem.s.offOpcode = offOpcode + 2;
1458 }
1459 else
1460 *pu32 = 0;
1461 return rcStrict;
1462}
1463
1464
1465/**
1466 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1467 *
1468 * @returns Strict VBox status code.
1469 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1470 * @param pu64 Where to return the opcode quad word.
1471 */
1472VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1473{
1474 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1475 if (rcStrict == VINF_SUCCESS)
1476 {
1477 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1478 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1479 pVCpu->iem.s.offOpcode = offOpcode + 2;
1480 }
1481 else
1482 *pu64 = 0;
1483 return rcStrict;
1484}
1485
1486#endif /* !IEM_WITH_SETJMP */
1487
1488#ifndef IEM_WITH_SETJMP
1489
1490/**
1491 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1492 *
1493 * @returns Strict VBox status code.
1494 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1495 * @param pu32 Where to return the opcode dword.
1496 */
1497VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1498{
1499 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1500 if (rcStrict == VINF_SUCCESS)
1501 {
1502 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1503# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1504 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1505# else
1506 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1507 pVCpu->iem.s.abOpcode[offOpcode + 1],
1508 pVCpu->iem.s.abOpcode[offOpcode + 2],
1509 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1510# endif
1511 pVCpu->iem.s.offOpcode = offOpcode + 4;
1512 }
1513 else
1514 *pu32 = 0;
1515 return rcStrict;
1516}
1517
1518#else /* IEM_WITH_SETJMP */
1519
1520/**
1521 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1522 *
1523 * @returns The opcode dword.
1524 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1525 */
1526uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1527{
1528# ifdef IEM_WITH_CODE_TLB
1529 uint32_t u32;
1530 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1531 return u32;
1532# else
1533 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1534 if (rcStrict == VINF_SUCCESS)
1535 {
1536 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1537 pVCpu->iem.s.offOpcode = offOpcode + 4;
1538# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1539 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1540# else
1541 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1542 pVCpu->iem.s.abOpcode[offOpcode + 1],
1543 pVCpu->iem.s.abOpcode[offOpcode + 2],
1544 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1545# endif
1546 }
1547 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1548# endif
1549}
1550
1551#endif /* IEM_WITH_SETJMP */
1552
1553#ifndef IEM_WITH_SETJMP
1554
1555/**
1556 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1557 *
1558 * @returns Strict VBox status code.
1559 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1560 * @param pu64 Where to return the opcode dword.
1561 */
1562VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1563{
1564 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1565 if (rcStrict == VINF_SUCCESS)
1566 {
1567 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1568 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1569 pVCpu->iem.s.abOpcode[offOpcode + 1],
1570 pVCpu->iem.s.abOpcode[offOpcode + 2],
1571 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1572 pVCpu->iem.s.offOpcode = offOpcode + 4;
1573 }
1574 else
1575 *pu64 = 0;
1576 return rcStrict;
1577}
1578
1579
1580/**
1581 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1582 *
1583 * @returns Strict VBox status code.
1584 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1585 * @param pu64 Where to return the opcode qword.
1586 */
1587VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1588{
1589 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1590 if (rcStrict == VINF_SUCCESS)
1591 {
1592 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1593 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1594 pVCpu->iem.s.abOpcode[offOpcode + 1],
1595 pVCpu->iem.s.abOpcode[offOpcode + 2],
1596 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1597 pVCpu->iem.s.offOpcode = offOpcode + 4;
1598 }
1599 else
1600 *pu64 = 0;
1601 return rcStrict;
1602}
1603
1604#endif /* !IEM_WITH_SETJMP */
1605
1606#ifndef IEM_WITH_SETJMP
1607
1608/**
1609 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1610 *
1611 * @returns Strict VBox status code.
1612 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1613 * @param pu64 Where to return the opcode qword.
1614 */
1615VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1616{
1617 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1618 if (rcStrict == VINF_SUCCESS)
1619 {
1620 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1621# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1622 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1623# else
1624 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1625 pVCpu->iem.s.abOpcode[offOpcode + 1],
1626 pVCpu->iem.s.abOpcode[offOpcode + 2],
1627 pVCpu->iem.s.abOpcode[offOpcode + 3],
1628 pVCpu->iem.s.abOpcode[offOpcode + 4],
1629 pVCpu->iem.s.abOpcode[offOpcode + 5],
1630 pVCpu->iem.s.abOpcode[offOpcode + 6],
1631 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1632# endif
1633 pVCpu->iem.s.offOpcode = offOpcode + 8;
1634 }
1635 else
1636 *pu64 = 0;
1637 return rcStrict;
1638}
1639
1640#else /* IEM_WITH_SETJMP */
1641
1642/**
1643 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1644 *
1645 * @returns The opcode qword.
1646 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1647 */
1648uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1649{
1650# ifdef IEM_WITH_CODE_TLB
1651 uint64_t u64;
1652 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1653 return u64;
1654# else
1655 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1656 if (rcStrict == VINF_SUCCESS)
1657 {
1658 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1659 pVCpu->iem.s.offOpcode = offOpcode + 8;
1660# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1661 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1662# else
1663 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1664 pVCpu->iem.s.abOpcode[offOpcode + 1],
1665 pVCpu->iem.s.abOpcode[offOpcode + 2],
1666 pVCpu->iem.s.abOpcode[offOpcode + 3],
1667 pVCpu->iem.s.abOpcode[offOpcode + 4],
1668 pVCpu->iem.s.abOpcode[offOpcode + 5],
1669 pVCpu->iem.s.abOpcode[offOpcode + 6],
1670 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1671# endif
1672 }
1673 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1674# endif
1675}
1676
1677#endif /* IEM_WITH_SETJMP */
1678
1679
1680
1681/** @name Misc Worker Functions.
1682 * @{
1683 */
1684
1685/**
1686 * Gets the exception class for the specified exception vector.
1687 *
1688 * @returns The class of the specified exception.
1689 * @param uVector The exception vector.
1690 */
1691static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
1692{
1693 Assert(uVector <= X86_XCPT_LAST);
1694 switch (uVector)
1695 {
1696 case X86_XCPT_DE:
1697 case X86_XCPT_TS:
1698 case X86_XCPT_NP:
1699 case X86_XCPT_SS:
1700 case X86_XCPT_GP:
1701 case X86_XCPT_SX: /* AMD only */
1702 return IEMXCPTCLASS_CONTRIBUTORY;
1703
1704 case X86_XCPT_PF:
1705 case X86_XCPT_VE: /* Intel only */
1706 return IEMXCPTCLASS_PAGE_FAULT;
1707
1708 case X86_XCPT_DF:
1709 return IEMXCPTCLASS_DOUBLE_FAULT;
1710 }
1711 return IEMXCPTCLASS_BENIGN;
1712}
1713
1714
1715/**
1716 * Evaluates how to handle an exception caused during delivery of another event
1717 * (exception / interrupt).
1718 *
1719 * @returns How to handle the recursive exception.
1720 * @param pVCpu The cross context virtual CPU structure of the
1721 * calling thread.
1722 * @param fPrevFlags The flags of the previous event.
1723 * @param uPrevVector The vector of the previous event.
1724 * @param fCurFlags The flags of the current exception.
1725 * @param uCurVector The vector of the current exception.
1726 * @param pfXcptRaiseInfo Where to store additional information about the
1727 * exception condition. Optional.
1728 */
1729VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
1730 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
1731{
1732 /*
1733 * Only CPU exceptions can be raised while delivering other events, software interrupt
1734 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
1735 */
1736 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
1737 Assert(pVCpu); RT_NOREF(pVCpu);
1738 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
1739
1740 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
1741 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
1742 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1743 {
1744 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
1745 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
1746 {
1747 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
1748 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
1749 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
1750 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
1751 {
1752 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1753 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
1754 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
1755 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
1756 uCurVector, pVCpu->cpum.GstCtx.cr2));
1757 }
1758 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1759 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
1760 {
1761 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1762 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
1763 }
1764 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
1765 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1766 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
1767 {
1768 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
1769 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
1770 }
1771 }
1772 else
1773 {
1774 if (uPrevVector == X86_XCPT_NMI)
1775 {
1776 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
1777 if (uCurVector == X86_XCPT_PF)
1778 {
1779 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
1780 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
1781 }
1782 }
1783 else if ( uPrevVector == X86_XCPT_AC
1784 && uCurVector == X86_XCPT_AC)
1785 {
1786 enmRaise = IEMXCPTRAISE_CPU_HANG;
1787 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
1788 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
1789 }
1790 }
1791 }
1792 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
1793 {
1794 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
1795 if (uCurVector == X86_XCPT_PF)
1796 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
1797 }
1798 else
1799 {
1800 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
1801 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
1802 }
1803
1804 if (pfXcptRaiseInfo)
1805 *pfXcptRaiseInfo = fRaiseInfo;
1806 return enmRaise;
1807}
1808
1809
1810/**
1811 * Enters the CPU shutdown state initiated by a triple fault or other
1812 * unrecoverable conditions.
1813 *
1814 * @returns Strict VBox status code.
1815 * @param pVCpu The cross context virtual CPU structure of the
1816 * calling thread.
1817 */
1818static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
1819{
1820 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1821 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
1822
1823 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
1824 {
1825 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
1826 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1827 }
1828
1829 RT_NOREF(pVCpu);
1830 return VINF_EM_TRIPLE_FAULT;
1831}
1832
1833
1834/**
1835 * Validates a new SS segment.
1836 *
1837 * @returns VBox strict status code.
1838 * @param pVCpu The cross context virtual CPU structure of the
1839 * calling thread.
1840 * @param NewSS The new SS selctor.
1841 * @param uCpl The CPL to load the stack for.
1842 * @param pDesc Where to return the descriptor.
1843 */
1844static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
1845{
1846 /* Null selectors are not allowed (we're not called for dispatching
1847 interrupts with SS=0 in long mode). */
1848 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1849 {
1850 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
1851 return iemRaiseTaskSwitchFault0(pVCpu);
1852 }
1853
1854 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1855 if ((NewSS & X86_SEL_RPL) != uCpl)
1856 {
1857 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1858 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1859 }
1860
1861 /*
1862 * Read the descriptor.
1863 */
1864 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
1865 if (rcStrict != VINF_SUCCESS)
1866 return rcStrict;
1867
1868 /*
1869 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1870 */
1871 if (!pDesc->Legacy.Gen.u1DescType)
1872 {
1873 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1874 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1875 }
1876
1877 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1878 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1879 {
1880 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1881 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1882 }
1883 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1884 {
1885 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1886 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1887 }
1888
1889 /* Is it there? */
1890 /** @todo testcase: Is this checked before the canonical / limit check below? */
1891 if (!pDesc->Legacy.Gen.u1Present)
1892 {
1893 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1894 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
1895 }
1896
1897 return VINF_SUCCESS;
1898}
1899
1900/** @} */
1901
1902
1903/** @name Raising Exceptions.
1904 *
1905 * @{
1906 */
1907
1908
1909/**
1910 * Loads the specified stack far pointer from the TSS.
1911 *
1912 * @returns VBox strict status code.
1913 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1914 * @param uCpl The CPL to load the stack for.
1915 * @param pSelSS Where to return the new stack segment.
1916 * @param puEsp Where to return the new stack pointer.
1917 */
1918static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
1919{
1920 VBOXSTRICTRC rcStrict;
1921 Assert(uCpl < 4);
1922
1923 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1924 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
1925 {
1926 /*
1927 * 16-bit TSS (X86TSS16).
1928 */
1929 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1930 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1931 {
1932 uint32_t off = uCpl * 4 + 2;
1933 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1934 {
1935 /** @todo check actual access pattern here. */
1936 uint32_t u32Tmp = 0; /* gcc maybe... */
1937 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1938 if (rcStrict == VINF_SUCCESS)
1939 {
1940 *puEsp = RT_LOWORD(u32Tmp);
1941 *pSelSS = RT_HIWORD(u32Tmp);
1942 return VINF_SUCCESS;
1943 }
1944 }
1945 else
1946 {
1947 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1948 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1949 }
1950 break;
1951 }
1952
1953 /*
1954 * 32-bit TSS (X86TSS32).
1955 */
1956 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1957 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1958 {
1959 uint32_t off = uCpl * 8 + 4;
1960 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1961 {
1962/** @todo check actual access pattern here. */
1963 uint64_t u64Tmp;
1964 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1965 if (rcStrict == VINF_SUCCESS)
1966 {
1967 *puEsp = u64Tmp & UINT32_MAX;
1968 *pSelSS = (RTSEL)(u64Tmp >> 32);
1969 return VINF_SUCCESS;
1970 }
1971 }
1972 else
1973 {
1974 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1975 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1976 }
1977 break;
1978 }
1979
1980 default:
1981 AssertFailed();
1982 rcStrict = VERR_IEM_IPE_4;
1983 break;
1984 }
1985
1986 *puEsp = 0; /* make gcc happy */
1987 *pSelSS = 0; /* make gcc happy */
1988 return rcStrict;
1989}
1990
1991
1992/**
1993 * Loads the specified stack pointer from the 64-bit TSS.
1994 *
1995 * @returns VBox strict status code.
1996 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1997 * @param uCpl The CPL to load the stack for.
1998 * @param uIst The interrupt stack table index, 0 if to use uCpl.
1999 * @param puRsp Where to return the new stack pointer.
2000 */
2001static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
2002{
2003 Assert(uCpl < 4);
2004 Assert(uIst < 8);
2005 *puRsp = 0; /* make gcc happy */
2006
2007 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2008 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
2009
2010 uint32_t off;
2011 if (uIst)
2012 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
2013 else
2014 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
2015 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
2016 {
2017 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
2018 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2019 }
2020
2021 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2022}
2023
2024
2025/**
2026 * Adjust the CPU state according to the exception being raised.
2027 *
2028 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2029 * @param u8Vector The exception that has been raised.
2030 */
2031DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
2032{
2033 switch (u8Vector)
2034 {
2035 case X86_XCPT_DB:
2036 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
2037 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
2038 break;
2039 /** @todo Read the AMD and Intel exception reference... */
2040 }
2041}
2042
2043
2044/**
2045 * Implements exceptions and interrupts for real mode.
2046 *
2047 * @returns VBox strict status code.
2048 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2049 * @param cbInstr The number of bytes to offset rIP by in the return
2050 * address.
2051 * @param u8Vector The interrupt / exception vector number.
2052 * @param fFlags The flags.
2053 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2054 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2055 */
2056static VBOXSTRICTRC
2057iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
2058 uint8_t cbInstr,
2059 uint8_t u8Vector,
2060 uint32_t fFlags,
2061 uint16_t uErr,
2062 uint64_t uCr2) RT_NOEXCEPT
2063{
2064 NOREF(uErr); NOREF(uCr2);
2065 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2066
2067 /*
2068 * Read the IDT entry.
2069 */
2070 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2071 {
2072 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2073 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2074 }
2075 RTFAR16 Idte;
2076 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
2077 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2078 {
2079 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2080 return rcStrict;
2081 }
2082
2083 /*
2084 * Push the stack frame.
2085 */
2086 uint16_t *pu16Frame;
2087 uint64_t uNewRsp;
2088 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &uNewRsp);
2089 if (rcStrict != VINF_SUCCESS)
2090 return rcStrict;
2091
2092 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
2093#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
2094 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
2095 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
2096 fEfl |= UINT16_C(0xf000);
2097#endif
2098 pu16Frame[2] = (uint16_t)fEfl;
2099 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
2100 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
2101 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
2102 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2103 return rcStrict;
2104
2105 /*
2106 * Load the vector address into cs:ip and make exception specific state
2107 * adjustments.
2108 */
2109 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
2110 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
2111 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2112 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
2113 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2114 pVCpu->cpum.GstCtx.rip = Idte.off;
2115 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
2116 IEMMISC_SET_EFL(pVCpu, fEfl);
2117
2118 /** @todo do we actually do this in real mode? */
2119 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2120 iemRaiseXcptAdjustState(pVCpu, u8Vector);
2121
2122 /* The IEM_F_MODE_XXX and IEM_F_X86_CPL_MASK doesn't really change here,
2123 so best leave them alone in case we're in a weird kind of real mode... */
2124
2125 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2126}
2127
2128
2129/**
2130 * Loads a NULL data selector into when coming from V8086 mode.
2131 *
2132 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2133 * @param pSReg Pointer to the segment register.
2134 */
2135DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
2136{
2137 pSReg->Sel = 0;
2138 pSReg->ValidSel = 0;
2139 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2140 {
2141 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2142 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2143 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2144 }
2145 else
2146 {
2147 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2148 /** @todo check this on AMD-V */
2149 pSReg->u64Base = 0;
2150 pSReg->u32Limit = 0;
2151 }
2152}
2153
2154
2155/**
2156 * Loads a segment selector during a task switch in V8086 mode.
2157 *
2158 * @param pSReg Pointer to the segment register.
2159 * @param uSel The selector value to load.
2160 */
2161DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
2162{
2163 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2164 pSReg->Sel = uSel;
2165 pSReg->ValidSel = uSel;
2166 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2167 pSReg->u64Base = uSel << 4;
2168 pSReg->u32Limit = 0xffff;
2169 pSReg->Attr.u = 0xf3;
2170}
2171
2172
2173/**
2174 * Loads a segment selector during a task switch in protected mode.
2175 *
2176 * In this task switch scenario, we would throw \#TS exceptions rather than
2177 * \#GPs.
2178 *
2179 * @returns VBox strict status code.
2180 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2181 * @param pSReg Pointer to the segment register.
2182 * @param uSel The new selector value.
2183 *
2184 * @remarks This does _not_ handle CS or SS.
2185 * @remarks This expects IEM_GET_CPL(pVCpu) to return an up to date value.
2186 */
2187static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
2188{
2189 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2190
2191 /* Null data selector. */
2192 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2193 {
2194 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2195 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2196 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2197 return VINF_SUCCESS;
2198 }
2199
2200 /* Fetch the descriptor. */
2201 IEMSELDESC Desc;
2202 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2203 if (rcStrict != VINF_SUCCESS)
2204 {
2205 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2206 VBOXSTRICTRC_VAL(rcStrict)));
2207 return rcStrict;
2208 }
2209
2210 /* Must be a data segment or readable code segment. */
2211 if ( !Desc.Legacy.Gen.u1DescType
2212 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2213 {
2214 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2215 Desc.Legacy.Gen.u4Type));
2216 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2217 }
2218
2219 /* Check privileges for data segments and non-conforming code segments. */
2220 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2221 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2222 {
2223 /* The RPL and the new CPL must be less than or equal to the DPL. */
2224 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2225 || (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl))
2226 {
2227 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2228 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
2229 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2230 }
2231 }
2232
2233 /* Is it there? */
2234 if (!Desc.Legacy.Gen.u1Present)
2235 {
2236 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2237 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2238 }
2239
2240 /* The base and limit. */
2241 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2242 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2243
2244 /*
2245 * Ok, everything checked out fine. Now set the accessed bit before
2246 * committing the result into the registers.
2247 */
2248 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2249 {
2250 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2251 if (rcStrict != VINF_SUCCESS)
2252 return rcStrict;
2253 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2254 }
2255
2256 /* Commit */
2257 pSReg->Sel = uSel;
2258 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2259 pSReg->u32Limit = cbLimit;
2260 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2261 pSReg->ValidSel = uSel;
2262 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2263 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2264 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2265
2266 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2267 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2268 return VINF_SUCCESS;
2269}
2270
2271
2272/**
2273 * Performs a task switch.
2274 *
2275 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2276 * caller is responsible for performing the necessary checks (like DPL, TSS
2277 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2278 * reference for JMP, CALL, IRET.
2279 *
2280 * If the task switch is the due to a software interrupt or hardware exception,
2281 * the caller is responsible for validating the TSS selector and descriptor. See
2282 * Intel Instruction reference for INT n.
2283 *
2284 * @returns VBox strict status code.
2285 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2286 * @param enmTaskSwitch The cause of the task switch.
2287 * @param uNextEip The EIP effective after the task switch.
2288 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2289 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2290 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2291 * @param SelTSS The TSS selector of the new task.
2292 * @param pNewDescTSS Pointer to the new TSS descriptor.
2293 */
2294VBOXSTRICTRC
2295iemTaskSwitch(PVMCPUCC pVCpu,
2296 IEMTASKSWITCH enmTaskSwitch,
2297 uint32_t uNextEip,
2298 uint32_t fFlags,
2299 uint16_t uErr,
2300 uint64_t uCr2,
2301 RTSEL SelTSS,
2302 PIEMSELDESC pNewDescTSS) RT_NOEXCEPT
2303{
2304 Assert(!IEM_IS_REAL_MODE(pVCpu));
2305 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2306 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2307
2308 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
2309 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2310 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2311 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2312 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2313
2314 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2315 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2316
2317 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
2318 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
2319
2320 /* Update CR2 in case it's a page-fault. */
2321 /** @todo This should probably be done much earlier in IEM/PGM. See
2322 * @bugref{5653#c49}. */
2323 if (fFlags & IEM_XCPT_FLAGS_CR2)
2324 pVCpu->cpum.GstCtx.cr2 = uCr2;
2325
2326 /*
2327 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2328 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2329 */
2330 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
2331 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2332 if (uNewTSSLimit < uNewTSSLimitMin)
2333 {
2334 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
2335 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
2336 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2337 }
2338
2339 /*
2340 * Task switches in VMX non-root mode always cause task switches.
2341 * The new TSS must have been read and validated (DPL, limits etc.) before a
2342 * task-switch VM-exit commences.
2343 *
2344 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2345 */
2346 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2347 {
2348 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
2349 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
2350 }
2351
2352 /*
2353 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2354 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2355 */
2356 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2357 {
2358 uint32_t const uExitInfo1 = SelTSS;
2359 uint32_t uExitInfo2 = uErr;
2360 switch (enmTaskSwitch)
2361 {
2362 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2363 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2364 default: break;
2365 }
2366 if (fFlags & IEM_XCPT_FLAGS_ERR)
2367 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2368 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2369 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2370
2371 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2372 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2373 RT_NOREF2(uExitInfo1, uExitInfo2);
2374 }
2375
2376 /*
2377 * Check the current TSS limit. The last written byte to the current TSS during the
2378 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2379 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2380 *
2381 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2382 * end up with smaller than "legal" TSS limits.
2383 */
2384 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2385 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
2386 if (uCurTSSLimit < uCurTSSLimitMin)
2387 {
2388 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
2389 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
2390 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2391 }
2392
2393 /*
2394 * Verify that the new TSS can be accessed and map it. Map only the required contents
2395 * and not the entire TSS.
2396 */
2397 void *pvNewTSS;
2398 uint32_t const cbNewTSS = uNewTSSLimitMin + 1;
2399 RTGCPTR const GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
2400 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2401 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2402 * not perform correct translation if this happens. See Intel spec. 7.2.1
2403 * "Task-State Segment". */
2404 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW, 0);
2405 if (rcStrict != VINF_SUCCESS)
2406 {
2407 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
2408 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
2409 return rcStrict;
2410 }
2411
2412 /*
2413 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2414 */
2415 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
2416 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2417 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2418 {
2419 PX86DESC pDescCurTSS;
2420 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
2421 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2422 if (rcStrict != VINF_SUCCESS)
2423 {
2424 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2425 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2426 return rcStrict;
2427 }
2428
2429 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2430 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
2431 if (rcStrict != VINF_SUCCESS)
2432 {
2433 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2434 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2435 return rcStrict;
2436 }
2437
2438 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2439 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2440 {
2441 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2442 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2443 fEFlags &= ~X86_EFL_NT;
2444 }
2445 }
2446
2447 /*
2448 * Save the CPU state into the current TSS.
2449 */
2450 RTGCPTR const GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
2451 if (GCPtrNewTSS == GCPtrCurTSS)
2452 {
2453 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
2454 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2455 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,
2456 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2457 pVCpu->cpum.GstCtx.ldtr.Sel));
2458 }
2459 if (fIsNewTSS386)
2460 {
2461 /*
2462 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2463 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2464 */
2465 void *pvCurTSS32;
2466 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
2467 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2468 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2469 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2470 if (rcStrict != VINF_SUCCESS)
2471 {
2472 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2473 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2474 return rcStrict;
2475 }
2476
2477 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2478 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
2479 pCurTSS32->eip = uNextEip;
2480 pCurTSS32->eflags = fEFlags;
2481 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
2482 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
2483 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
2484 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
2485 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
2486 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
2487 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
2488 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
2489 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
2490 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2491 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2492 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2493 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2494 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2495
2496 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
2497 if (rcStrict != VINF_SUCCESS)
2498 {
2499 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2500 VBOXSTRICTRC_VAL(rcStrict)));
2501 return rcStrict;
2502 }
2503 }
2504 else
2505 {
2506 /*
2507 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2508 */
2509 void *pvCurTSS16;
2510 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
2511 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2512 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2513 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2514 if (rcStrict != VINF_SUCCESS)
2515 {
2516 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2517 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2518 return rcStrict;
2519 }
2520
2521 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2522 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
2523 pCurTSS16->ip = uNextEip;
2524 pCurTSS16->flags = (uint16_t)fEFlags;
2525 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
2526 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
2527 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
2528 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
2529 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
2530 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
2531 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
2532 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
2533 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
2534 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2535 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2536 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2537
2538 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
2539 if (rcStrict != VINF_SUCCESS)
2540 {
2541 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2542 VBOXSTRICTRC_VAL(rcStrict)));
2543 return rcStrict;
2544 }
2545 }
2546
2547 /*
2548 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2549 */
2550 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2551 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2552 {
2553 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2554 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
2555 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2556 }
2557
2558 /*
2559 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2560 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2561 */
2562 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2563 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2564 bool fNewDebugTrap;
2565 if (fIsNewTSS386)
2566 {
2567 PCX86TSS32 pNewTSS32 = (PCX86TSS32)pvNewTSS;
2568 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
2569 uNewEip = pNewTSS32->eip;
2570 uNewEflags = pNewTSS32->eflags;
2571 uNewEax = pNewTSS32->eax;
2572 uNewEcx = pNewTSS32->ecx;
2573 uNewEdx = pNewTSS32->edx;
2574 uNewEbx = pNewTSS32->ebx;
2575 uNewEsp = pNewTSS32->esp;
2576 uNewEbp = pNewTSS32->ebp;
2577 uNewEsi = pNewTSS32->esi;
2578 uNewEdi = pNewTSS32->edi;
2579 uNewES = pNewTSS32->es;
2580 uNewCS = pNewTSS32->cs;
2581 uNewSS = pNewTSS32->ss;
2582 uNewDS = pNewTSS32->ds;
2583 uNewFS = pNewTSS32->fs;
2584 uNewGS = pNewTSS32->gs;
2585 uNewLdt = pNewTSS32->selLdt;
2586 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
2587 }
2588 else
2589 {
2590 PCX86TSS16 pNewTSS16 = (PCX86TSS16)pvNewTSS;
2591 uNewCr3 = 0;
2592 uNewEip = pNewTSS16->ip;
2593 uNewEflags = pNewTSS16->flags;
2594 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
2595 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
2596 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
2597 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
2598 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
2599 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
2600 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
2601 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
2602 uNewES = pNewTSS16->es;
2603 uNewCS = pNewTSS16->cs;
2604 uNewSS = pNewTSS16->ss;
2605 uNewDS = pNewTSS16->ds;
2606 uNewFS = 0;
2607 uNewGS = 0;
2608 uNewLdt = pNewTSS16->selLdt;
2609 fNewDebugTrap = false;
2610 }
2611
2612 if (GCPtrNewTSS == GCPtrCurTSS)
2613 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2614 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2615
2616 /*
2617 * We're done accessing the new TSS.
2618 */
2619 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
2620 if (rcStrict != VINF_SUCCESS)
2621 {
2622 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2623 return rcStrict;
2624 }
2625
2626 /*
2627 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2628 */
2629 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2630 {
2631 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
2632 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2633 if (rcStrict != VINF_SUCCESS)
2634 {
2635 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2636 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2637 return rcStrict;
2638 }
2639
2640 /* Check that the descriptor indicates the new TSS is available (not busy). */
2641 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2642 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2643 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
2644
2645 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2646 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
2647 if (rcStrict != VINF_SUCCESS)
2648 {
2649 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2650 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2651 return rcStrict;
2652 }
2653 }
2654
2655 /*
2656 * From this point on, we're technically in the new task. We will defer exceptions
2657 * until the completion of the task switch but before executing any instructions in the new task.
2658 */
2659 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
2660 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
2661 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2662 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
2663 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
2664 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
2665 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
2666
2667 /* Set the busy bit in TR. */
2668 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2669
2670 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2671 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2672 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2673 {
2674 uNewEflags |= X86_EFL_NT;
2675 }
2676
2677 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2678 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
2679 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
2680
2681 pVCpu->cpum.GstCtx.eip = uNewEip;
2682 pVCpu->cpum.GstCtx.eax = uNewEax;
2683 pVCpu->cpum.GstCtx.ecx = uNewEcx;
2684 pVCpu->cpum.GstCtx.edx = uNewEdx;
2685 pVCpu->cpum.GstCtx.ebx = uNewEbx;
2686 pVCpu->cpum.GstCtx.esp = uNewEsp;
2687 pVCpu->cpum.GstCtx.ebp = uNewEbp;
2688 pVCpu->cpum.GstCtx.esi = uNewEsi;
2689 pVCpu->cpum.GstCtx.edi = uNewEdi;
2690
2691 uNewEflags &= X86_EFL_LIVE_MASK;
2692 uNewEflags |= X86_EFL_RA1_MASK;
2693 IEMMISC_SET_EFL(pVCpu, uNewEflags);
2694
2695 /*
2696 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2697 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2698 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2699 */
2700 pVCpu->cpum.GstCtx.es.Sel = uNewES;
2701 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
2702
2703 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2704 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
2705
2706 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2707 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
2708
2709 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
2710 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
2711
2712 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
2713 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
2714
2715 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
2716 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
2717 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2718
2719 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
2720 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2721 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
2722 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
2723
2724 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2725 {
2726 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
2727 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
2728 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
2729 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
2730 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
2731 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
2732 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2733 }
2734
2735 /*
2736 * Switch CR3 for the new task.
2737 */
2738 if ( fIsNewTSS386
2739 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
2740 {
2741 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2742 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
2743 AssertRCSuccessReturn(rc, rc);
2744
2745 /* Inform PGM. */
2746 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
2747 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
2748 AssertRCReturn(rc, rc);
2749 /* ignore informational status codes */
2750
2751 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
2752 }
2753
2754 /*
2755 * Switch LDTR for the new task.
2756 */
2757 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2758 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
2759 else
2760 {
2761 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2762
2763 IEMSELDESC DescNewLdt;
2764 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2765 if (rcStrict != VINF_SUCCESS)
2766 {
2767 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2768 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2769 return rcStrict;
2770 }
2771 if ( !DescNewLdt.Legacy.Gen.u1Present
2772 || DescNewLdt.Legacy.Gen.u1DescType
2773 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2774 {
2775 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2776 uNewLdt, DescNewLdt.Legacy.u));
2777 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2778 }
2779
2780 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
2781 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2782 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2783 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2784 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2785 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2786 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2787 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
2788 }
2789
2790 IEMSELDESC DescSS;
2791 if (IEM_IS_V86_MODE(pVCpu))
2792 {
2793 IEM_SET_CPL(pVCpu, 3);
2794 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
2795 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
2796 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
2797 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
2798 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
2799 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
2800
2801 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
2802 DescSS.Legacy.u = 0;
2803 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
2804 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
2805 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
2806 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
2807 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
2808 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
2809 DescSS.Legacy.Gen.u2Dpl = 3;
2810 }
2811 else
2812 {
2813 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
2814
2815 /*
2816 * Load the stack segment for the new task.
2817 */
2818 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2819 {
2820 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2821 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2822 }
2823
2824 /* Fetch the descriptor. */
2825 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
2826 if (rcStrict != VINF_SUCCESS)
2827 {
2828 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2829 VBOXSTRICTRC_VAL(rcStrict)));
2830 return rcStrict;
2831 }
2832
2833 /* SS must be a data segment and writable. */
2834 if ( !DescSS.Legacy.Gen.u1DescType
2835 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2836 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2837 {
2838 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2839 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2840 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2841 }
2842
2843 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2844 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2845 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2846 {
2847 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2848 uNewCpl));
2849 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2850 }
2851
2852 /* Is it there? */
2853 if (!DescSS.Legacy.Gen.u1Present)
2854 {
2855 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
2856 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2857 }
2858
2859 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
2860 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
2861
2862 /* Set the accessed bit before committing the result into SS. */
2863 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2864 {
2865 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
2866 if (rcStrict != VINF_SUCCESS)
2867 return rcStrict;
2868 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2869 }
2870
2871 /* Commit SS. */
2872 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2873 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
2874 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2875 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
2876 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
2877 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2878 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
2879
2880 /* CPL has changed, update IEM before loading rest of segments. */
2881 IEM_SET_CPL(pVCpu, uNewCpl);
2882
2883 /*
2884 * Load the data segments for the new task.
2885 */
2886 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
2887 if (rcStrict != VINF_SUCCESS)
2888 return rcStrict;
2889 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
2890 if (rcStrict != VINF_SUCCESS)
2891 return rcStrict;
2892 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
2893 if (rcStrict != VINF_SUCCESS)
2894 return rcStrict;
2895 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
2896 if (rcStrict != VINF_SUCCESS)
2897 return rcStrict;
2898
2899 /*
2900 * Load the code segment for the new task.
2901 */
2902 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
2903 {
2904 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
2905 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2906 }
2907
2908 /* Fetch the descriptor. */
2909 IEMSELDESC DescCS;
2910 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
2911 if (rcStrict != VINF_SUCCESS)
2912 {
2913 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
2914 return rcStrict;
2915 }
2916
2917 /* CS must be a code segment. */
2918 if ( !DescCS.Legacy.Gen.u1DescType
2919 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2920 {
2921 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
2922 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
2923 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2924 }
2925
2926 /* For conforming CS, DPL must be less than or equal to the RPL. */
2927 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2928 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
2929 {
2930 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
2931 DescCS.Legacy.Gen.u2Dpl));
2932 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2933 }
2934
2935 /* For non-conforming CS, DPL must match RPL. */
2936 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2937 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
2938 {
2939 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
2940 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
2941 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2942 }
2943
2944 /* Is it there? */
2945 if (!DescCS.Legacy.Gen.u1Present)
2946 {
2947 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
2948 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2949 }
2950
2951 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
2952 u64Base = X86DESC_BASE(&DescCS.Legacy);
2953
2954 /* Set the accessed bit before committing the result into CS. */
2955 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2956 {
2957 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
2958 if (rcStrict != VINF_SUCCESS)
2959 return rcStrict;
2960 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2961 }
2962
2963 /* Commit CS. */
2964 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2965 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
2966 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2967 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
2968 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2969 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2970 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
2971 }
2972
2973 /* Make sure the CPU mode is correct. */
2974 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
2975 if (fExecNew != pVCpu->iem.s.fExec)
2976 Log(("iemTaskSwitch: fExec %#x -> %#x (xor %#x)\n", pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
2977 pVCpu->iem.s.fExec = fExecNew;
2978
2979 /** @todo Debug trap. */
2980 if (fIsNewTSS386 && fNewDebugTrap)
2981 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
2982
2983 /*
2984 * Construct the error code masks based on what caused this task switch.
2985 * See Intel Instruction reference for INT.
2986 */
2987 uint16_t uExt;
2988 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
2989 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2990 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
2991 uExt = 1;
2992 else
2993 uExt = 0;
2994
2995 /*
2996 * Push any error code on to the new stack.
2997 */
2998 if (fFlags & IEM_XCPT_FLAGS_ERR)
2999 {
3000 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3001 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3002 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
3003
3004 /* Check that there is sufficient space on the stack. */
3005 /** @todo Factor out segment limit checking for normal/expand down segments
3006 * into a separate function. */
3007 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3008 {
3009 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
3010 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
3011 {
3012 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3013 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
3014 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3015 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3016 }
3017 }
3018 else
3019 {
3020 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
3021 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3022 {
3023 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
3024 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3025 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3026 }
3027 }
3028
3029
3030 if (fIsNewTSS386)
3031 rcStrict = iemMemStackPushU32(pVCpu, uErr);
3032 else
3033 rcStrict = iemMemStackPushU16(pVCpu, uErr);
3034 if (rcStrict != VINF_SUCCESS)
3035 {
3036 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
3037 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
3038 return rcStrict;
3039 }
3040 }
3041
3042 /* Check the new EIP against the new CS limit. */
3043 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
3044 {
3045 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
3046 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
3047 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3048 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
3049 }
3050
3051 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3052 pVCpu->cpum.GstCtx.ss.Sel));
3053 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3054}
3055
3056
3057/**
3058 * Implements exceptions and interrupts for protected mode.
3059 *
3060 * @returns VBox strict status code.
3061 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3062 * @param cbInstr The number of bytes to offset rIP by in the return
3063 * address.
3064 * @param u8Vector The interrupt / exception vector number.
3065 * @param fFlags The flags.
3066 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3067 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3068 */
3069static VBOXSTRICTRC
3070iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
3071 uint8_t cbInstr,
3072 uint8_t u8Vector,
3073 uint32_t fFlags,
3074 uint16_t uErr,
3075 uint64_t uCr2) RT_NOEXCEPT
3076{
3077 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3078
3079 /*
3080 * Read the IDT entry.
3081 */
3082 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3083 {
3084 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3085 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3086 }
3087 X86DESC Idte;
3088 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
3089 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
3090 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3091 {
3092 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3093 return rcStrict;
3094 }
3095 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x - from %04x:%08RX64 efl=%#x depth=%d\n",
3096 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3097 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow,
3098 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->iem.s.cXcptRecursions));
3099
3100 /*
3101 * Check the descriptor type, DPL and such.
3102 * ASSUMES this is done in the same order as described for call-gate calls.
3103 */
3104 if (Idte.Gate.u1DescType)
3105 {
3106 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3107 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3108 }
3109 bool fTaskGate = false;
3110 uint8_t f32BitGate = true;
3111 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3112 switch (Idte.Gate.u4Type)
3113 {
3114 case X86_SEL_TYPE_SYS_UNDEFINED:
3115 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3116 case X86_SEL_TYPE_SYS_LDT:
3117 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3118 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3119 case X86_SEL_TYPE_SYS_UNDEFINED2:
3120 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3121 case X86_SEL_TYPE_SYS_UNDEFINED3:
3122 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3123 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3124 case X86_SEL_TYPE_SYS_UNDEFINED4:
3125 {
3126 /** @todo check what actually happens when the type is wrong...
3127 * esp. call gates. */
3128 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3129 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3130 }
3131
3132 case X86_SEL_TYPE_SYS_286_INT_GATE:
3133 f32BitGate = false;
3134 RT_FALL_THRU();
3135 case X86_SEL_TYPE_SYS_386_INT_GATE:
3136 fEflToClear |= X86_EFL_IF;
3137 break;
3138
3139 case X86_SEL_TYPE_SYS_TASK_GATE:
3140 fTaskGate = true;
3141#ifndef IEM_IMPLEMENTS_TASKSWITCH
3142 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3143#endif
3144 break;
3145
3146 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3147 f32BitGate = false;
3148 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3149 break;
3150
3151 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3152 }
3153
3154 /* Check DPL against CPL if applicable. */
3155 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3156 {
3157 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3158 {
3159 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3160 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3161 }
3162 }
3163
3164 /* Is it there? */
3165 if (!Idte.Gate.u1Present)
3166 {
3167 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3168 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3169 }
3170
3171 /* Is it a task-gate? */
3172 if (fTaskGate)
3173 {
3174 /*
3175 * Construct the error code masks based on what caused this task switch.
3176 * See Intel Instruction reference for INT.
3177 */
3178 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3179 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
3180 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3181 RTSEL SelTSS = Idte.Gate.u16Sel;
3182
3183 /*
3184 * Fetch the TSS descriptor in the GDT.
3185 */
3186 IEMSELDESC DescTSS;
3187 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
3188 if (rcStrict != VINF_SUCCESS)
3189 {
3190 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
3191 VBOXSTRICTRC_VAL(rcStrict)));
3192 return rcStrict;
3193 }
3194
3195 /* The TSS descriptor must be a system segment and be available (not busy). */
3196 if ( DescTSS.Legacy.Gen.u1DescType
3197 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3198 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3199 {
3200 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3201 u8Vector, SelTSS, DescTSS.Legacy.au64));
3202 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
3203 }
3204
3205 /* The TSS must be present. */
3206 if (!DescTSS.Legacy.Gen.u1Present)
3207 {
3208 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
3209 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
3210 }
3211
3212 /* Do the actual task switch. */
3213 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3214 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3215 fFlags, uErr, uCr2, SelTSS, &DescTSS);
3216 }
3217
3218 /* A null CS is bad. */
3219 RTSEL NewCS = Idte.Gate.u16Sel;
3220 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3221 {
3222 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3223 return iemRaiseGeneralProtectionFault0(pVCpu);
3224 }
3225
3226 /* Fetch the descriptor for the new CS. */
3227 IEMSELDESC DescCS;
3228 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3229 if (rcStrict != VINF_SUCCESS)
3230 {
3231 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3232 return rcStrict;
3233 }
3234
3235 /* Must be a code segment. */
3236 if (!DescCS.Legacy.Gen.u1DescType)
3237 {
3238 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3239 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3240 }
3241 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3242 {
3243 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3244 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3245 }
3246
3247 /* Don't allow lowering the privilege level. */
3248 /** @todo Does the lowering of privileges apply to software interrupts
3249 * only? This has bearings on the more-privileged or
3250 * same-privilege stack behavior further down. A testcase would
3251 * be nice. */
3252 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3253 {
3254 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3255 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3256 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3257 }
3258
3259 /* Make sure the selector is present. */
3260 if (!DescCS.Legacy.Gen.u1Present)
3261 {
3262 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3263 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3264 }
3265
3266#ifdef LOG_ENABLED
3267 /* If software interrupt, try decode it if logging is enabled and such. */
3268 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3269 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
3270 iemLogSyscallProtModeInt(pVCpu, u8Vector, cbInstr);
3271#endif
3272
3273 /* Check the new EIP against the new CS limit. */
3274 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3275 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3276 ? Idte.Gate.u16OffsetLow
3277 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3278 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3279 if (uNewEip > cbLimitCS)
3280 {
3281 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3282 u8Vector, uNewEip, cbLimitCS, NewCS));
3283 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3284 }
3285 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3286
3287 /* Calc the flag image to push. */
3288 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3289 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3290 fEfl &= ~X86_EFL_RF;
3291 else
3292 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3293
3294 /* From V8086 mode only go to CPL 0. */
3295 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3296 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3297 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3298 {
3299 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3300 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3301 }
3302
3303 /*
3304 * If the privilege level changes, we need to get a new stack from the TSS.
3305 * This in turns means validating the new SS and ESP...
3306 */
3307 if (uNewCpl != IEM_GET_CPL(pVCpu))
3308 {
3309 RTSEL NewSS;
3310 uint32_t uNewEsp;
3311 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3312 if (rcStrict != VINF_SUCCESS)
3313 return rcStrict;
3314
3315 IEMSELDESC DescSS;
3316 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3317 if (rcStrict != VINF_SUCCESS)
3318 return rcStrict;
3319 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3320 if (!DescSS.Legacy.Gen.u1DefBig)
3321 {
3322 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3323 uNewEsp = (uint16_t)uNewEsp;
3324 }
3325
3326 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3327
3328 /* Check that there is sufficient space for the stack frame. */
3329 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3330 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3331 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3332 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3333
3334 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3335 {
3336 if ( uNewEsp - 1 > cbLimitSS
3337 || uNewEsp < cbStackFrame)
3338 {
3339 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3340 u8Vector, NewSS, uNewEsp, cbStackFrame));
3341 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3342 }
3343 }
3344 else
3345 {
3346 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3347 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3348 {
3349 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3350 u8Vector, NewSS, uNewEsp, cbStackFrame));
3351 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3352 }
3353 }
3354
3355 /*
3356 * Start making changes.
3357 */
3358
3359 /* Set the new CPL so that stack accesses use it. */
3360 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3361 IEM_SET_CPL(pVCpu, uNewCpl);
3362
3363 /* Create the stack frame. */
3364 RTPTRUNION uStackFrame;
3365 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3366 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
3367 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3368 if (rcStrict != VINF_SUCCESS)
3369 return rcStrict;
3370 void * const pvStackFrame = uStackFrame.pv;
3371 if (f32BitGate)
3372 {
3373 if (fFlags & IEM_XCPT_FLAGS_ERR)
3374 *uStackFrame.pu32++ = uErr;
3375 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3376 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3377 uStackFrame.pu32[2] = fEfl;
3378 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3379 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3380 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3381 if (fEfl & X86_EFL_VM)
3382 {
3383 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3384 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3385 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3386 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3387 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3388 }
3389 }
3390 else
3391 {
3392 if (fFlags & IEM_XCPT_FLAGS_ERR)
3393 *uStackFrame.pu16++ = uErr;
3394 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3395 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3396 uStackFrame.pu16[2] = fEfl;
3397 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3398 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3399 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3400 if (fEfl & X86_EFL_VM)
3401 {
3402 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3403 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3404 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3405 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3406 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3407 }
3408 }
3409 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3410 if (rcStrict != VINF_SUCCESS)
3411 return rcStrict;
3412
3413 /* Mark the selectors 'accessed' (hope this is the correct time). */
3414 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3415 * after pushing the stack frame? (Write protect the gdt + stack to
3416 * find out.) */
3417 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3418 {
3419 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3420 if (rcStrict != VINF_SUCCESS)
3421 return rcStrict;
3422 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3423 }
3424
3425 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3426 {
3427 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3428 if (rcStrict != VINF_SUCCESS)
3429 return rcStrict;
3430 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3431 }
3432
3433 /*
3434 * Start comitting the register changes (joins with the DPL=CPL branch).
3435 */
3436 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3437 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3438 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3439 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3440 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3441 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3442 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3443 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3444 * SP is loaded).
3445 * Need to check the other combinations too:
3446 * - 16-bit TSS, 32-bit handler
3447 * - 32-bit TSS, 16-bit handler */
3448 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3449 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3450 else
3451 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3452
3453 if (fEfl & X86_EFL_VM)
3454 {
3455 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3456 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3457 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3458 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3459 }
3460 }
3461 /*
3462 * Same privilege, no stack change and smaller stack frame.
3463 */
3464 else
3465 {
3466 uint64_t uNewRsp;
3467 RTPTRUNION uStackFrame;
3468 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3469 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1, &uStackFrame.pv, &uNewRsp);
3470 if (rcStrict != VINF_SUCCESS)
3471 return rcStrict;
3472 void * const pvStackFrame = uStackFrame.pv;
3473
3474 if (f32BitGate)
3475 {
3476 if (fFlags & IEM_XCPT_FLAGS_ERR)
3477 *uStackFrame.pu32++ = uErr;
3478 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3479 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3480 uStackFrame.pu32[2] = fEfl;
3481 }
3482 else
3483 {
3484 if (fFlags & IEM_XCPT_FLAGS_ERR)
3485 *uStackFrame.pu16++ = uErr;
3486 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3487 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3488 uStackFrame.pu16[2] = fEfl;
3489 }
3490 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
3491 if (rcStrict != VINF_SUCCESS)
3492 return rcStrict;
3493
3494 /* Mark the CS selector as 'accessed'. */
3495 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3496 {
3497 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3498 if (rcStrict != VINF_SUCCESS)
3499 return rcStrict;
3500 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3501 }
3502
3503 /*
3504 * Start committing the register changes (joins with the other branch).
3505 */
3506 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3507 }
3508
3509 /* ... register committing continues. */
3510 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3511 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3512 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3513 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3514 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3515 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3516
3517 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3518 fEfl &= ~fEflToClear;
3519 IEMMISC_SET_EFL(pVCpu, fEfl);
3520
3521 if (fFlags & IEM_XCPT_FLAGS_CR2)
3522 pVCpu->cpum.GstCtx.cr2 = uCr2;
3523
3524 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3525 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3526
3527 /* Make sure the execution flags are correct. */
3528 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3529 if (fExecNew != pVCpu->iem.s.fExec)
3530 Log(("iemRaiseXcptOrIntInProtMode: fExec %#x -> %#x (xor %#x)\n",
3531 pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3532 pVCpu->iem.s.fExec = fExecNew;
3533 Assert(IEM_GET_CPL(pVCpu) == uNewCpl);
3534
3535 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3536}
3537
3538
3539/**
3540 * Implements exceptions and interrupts for long mode.
3541 *
3542 * @returns VBox strict status code.
3543 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3544 * @param cbInstr The number of bytes to offset rIP by in the return
3545 * address.
3546 * @param u8Vector The interrupt / exception vector number.
3547 * @param fFlags The flags.
3548 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3549 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3550 */
3551static VBOXSTRICTRC
3552iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3553 uint8_t cbInstr,
3554 uint8_t u8Vector,
3555 uint32_t fFlags,
3556 uint16_t uErr,
3557 uint64_t uCr2) RT_NOEXCEPT
3558{
3559 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3560
3561 /*
3562 * Read the IDT entry.
3563 */
3564 uint16_t offIdt = (uint16_t)u8Vector << 4;
3565 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3566 {
3567 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3568 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3569 }
3570 X86DESC64 Idte;
3571#ifdef _MSC_VER /* Shut up silly compiler warning. */
3572 Idte.au64[0] = 0;
3573 Idte.au64[1] = 0;
3574#endif
3575 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3576 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3577 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3578 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3579 {
3580 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3581 return rcStrict;
3582 }
3583 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3584 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3585 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3586
3587 /*
3588 * Check the descriptor type, DPL and such.
3589 * ASSUMES this is done in the same order as described for call-gate calls.
3590 */
3591 if (Idte.Gate.u1DescType)
3592 {
3593 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3594 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3595 }
3596 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3597 switch (Idte.Gate.u4Type)
3598 {
3599 case AMD64_SEL_TYPE_SYS_INT_GATE:
3600 fEflToClear |= X86_EFL_IF;
3601 break;
3602 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3603 break;
3604
3605 default:
3606 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3607 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3608 }
3609
3610 /* Check DPL against CPL if applicable. */
3611 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3612 {
3613 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3614 {
3615 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3616 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3617 }
3618 }
3619
3620 /* Is it there? */
3621 if (!Idte.Gate.u1Present)
3622 {
3623 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3624 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3625 }
3626
3627 /* A null CS is bad. */
3628 RTSEL NewCS = Idte.Gate.u16Sel;
3629 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3630 {
3631 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3632 return iemRaiseGeneralProtectionFault0(pVCpu);
3633 }
3634
3635 /* Fetch the descriptor for the new CS. */
3636 IEMSELDESC DescCS;
3637 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
3638 if (rcStrict != VINF_SUCCESS)
3639 {
3640 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3641 return rcStrict;
3642 }
3643
3644 /* Must be a 64-bit code segment. */
3645 if (!DescCS.Long.Gen.u1DescType)
3646 {
3647 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3648 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3649 }
3650 if ( !DescCS.Long.Gen.u1Long
3651 || DescCS.Long.Gen.u1DefBig
3652 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3653 {
3654 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3655 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3656 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3657 }
3658
3659 /* Don't allow lowering the privilege level. For non-conforming CS
3660 selectors, the CS.DPL sets the privilege level the trap/interrupt
3661 handler runs at. For conforming CS selectors, the CPL remains
3662 unchanged, but the CS.DPL must be <= CPL. */
3663 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3664 * when CPU in Ring-0. Result \#GP? */
3665 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3666 {
3667 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3668 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3669 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3670 }
3671
3672
3673 /* Make sure the selector is present. */
3674 if (!DescCS.Legacy.Gen.u1Present)
3675 {
3676 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3677 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3678 }
3679
3680 /* Check that the new RIP is canonical. */
3681 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3682 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3683 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3684 if (!IEM_IS_CANONICAL(uNewRip))
3685 {
3686 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3687 return iemRaiseGeneralProtectionFault0(pVCpu);
3688 }
3689
3690 /*
3691 * If the privilege level changes or if the IST isn't zero, we need to get
3692 * a new stack from the TSS.
3693 */
3694 uint64_t uNewRsp;
3695 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3696 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3697 if ( uNewCpl != IEM_GET_CPL(pVCpu)
3698 || Idte.Gate.u3IST != 0)
3699 {
3700 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3701 if (rcStrict != VINF_SUCCESS)
3702 return rcStrict;
3703 }
3704 else
3705 uNewRsp = pVCpu->cpum.GstCtx.rsp;
3706 uNewRsp &= ~(uint64_t)0xf;
3707
3708 /*
3709 * Calc the flag image to push.
3710 */
3711 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3712 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3713 fEfl &= ~X86_EFL_RF;
3714 else
3715 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3716
3717 /*
3718 * Start making changes.
3719 */
3720 /* Set the new CPL so that stack accesses use it. */
3721 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3722 IEM_SET_CPL(pVCpu, uNewCpl);
3723/** @todo Setting CPL this early seems wrong as it would affect and errors we
3724 * raise accessing the stack and (?) GDT/LDT... */
3725
3726 /* Create the stack frame. */
3727 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3728 RTPTRUNION uStackFrame;
3729 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3730 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3731 if (rcStrict != VINF_SUCCESS)
3732 return rcStrict;
3733 void * const pvStackFrame = uStackFrame.pv;
3734
3735 if (fFlags & IEM_XCPT_FLAGS_ERR)
3736 *uStackFrame.pu64++ = uErr;
3737 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
3738 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
3739 uStackFrame.pu64[2] = fEfl;
3740 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
3741 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
3742 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3743 if (rcStrict != VINF_SUCCESS)
3744 return rcStrict;
3745
3746 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3747 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3748 * after pushing the stack frame? (Write protect the gdt + stack to
3749 * find out.) */
3750 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3751 {
3752 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3753 if (rcStrict != VINF_SUCCESS)
3754 return rcStrict;
3755 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3756 }
3757
3758 /*
3759 * Start comitting the register changes.
3760 */
3761 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3762 * hidden registers when interrupting 32-bit or 16-bit code! */
3763 if (uNewCpl != uOldCpl)
3764 {
3765 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
3766 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
3767 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3768 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3769 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3770 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3771 }
3772 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
3773 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3774 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3775 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3776 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3777 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3778 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3779 pVCpu->cpum.GstCtx.rip = uNewRip;
3780
3781 fEfl &= ~fEflToClear;
3782 IEMMISC_SET_EFL(pVCpu, fEfl);
3783
3784 if (fFlags & IEM_XCPT_FLAGS_CR2)
3785 pVCpu->cpum.GstCtx.cr2 = uCr2;
3786
3787 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3788 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3789
3790 iemRecalcExecModeAndCplFlags(pVCpu);
3791
3792 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3793}
3794
3795
3796/**
3797 * Implements exceptions and interrupts.
3798 *
3799 * All exceptions and interrupts goes thru this function!
3800 *
3801 * @returns VBox strict status code.
3802 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3803 * @param cbInstr The number of bytes to offset rIP by in the return
3804 * address.
3805 * @param u8Vector The interrupt / exception vector number.
3806 * @param fFlags The flags.
3807 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3808 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3809 */
3810VBOXSTRICTRC
3811iemRaiseXcptOrInt(PVMCPUCC pVCpu,
3812 uint8_t cbInstr,
3813 uint8_t u8Vector,
3814 uint32_t fFlags,
3815 uint16_t uErr,
3816 uint64_t uCr2) RT_NOEXCEPT
3817{
3818 /*
3819 * Get all the state that we might need here.
3820 */
3821 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3822 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3823
3824#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
3825 /*
3826 * Flush prefetch buffer
3827 */
3828 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3829#endif
3830
3831 /*
3832 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3833 */
3834 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
3835 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
3836 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
3837 | IEM_XCPT_FLAGS_BP_INSTR
3838 | IEM_XCPT_FLAGS_ICEBP_INSTR
3839 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3840 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
3841 {
3842 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3843 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3844 u8Vector = X86_XCPT_GP;
3845 uErr = 0;
3846 }
3847#ifdef DBGFTRACE_ENABLED
3848 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3849 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3850 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
3851#endif
3852
3853 /*
3854 * Evaluate whether NMI blocking should be in effect.
3855 * Normally, NMI blocking is in effect whenever we inject an NMI.
3856 */
3857 bool fBlockNmi = u8Vector == X86_XCPT_NMI
3858 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);
3859
3860#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3861 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3862 {
3863 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
3864 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3865 return rcStrict0;
3866
3867 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
3868 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
3869 {
3870 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
3871 fBlockNmi = false;
3872 }
3873 }
3874#endif
3875
3876#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3877 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
3878 {
3879 /*
3880 * If the event is being injected as part of VMRUN, it isn't subject to event
3881 * intercepts in the nested-guest. However, secondary exceptions that occur
3882 * during injection of any event -are- subject to exception intercepts.
3883 *
3884 * See AMD spec. 15.20 "Event Injection".
3885 */
3886 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
3887 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
3888 else
3889 {
3890 /*
3891 * Check and handle if the event being raised is intercepted.
3892 */
3893 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3894 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
3895 return rcStrict0;
3896 }
3897 }
3898#endif
3899
3900 /*
3901 * Set NMI blocking if necessary.
3902 */
3903 if (fBlockNmi)
3904 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
3905
3906 /*
3907 * Do recursion accounting.
3908 */
3909 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
3910 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
3911 if (pVCpu->iem.s.cXcptRecursions == 0)
3912 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3913 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
3914 else
3915 {
3916 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3917 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
3918 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
3919
3920 if (pVCpu->iem.s.cXcptRecursions >= 4)
3921 {
3922#ifdef DEBUG_bird
3923 AssertFailed();
3924#endif
3925 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3926 }
3927
3928 /*
3929 * Evaluate the sequence of recurring events.
3930 */
3931 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
3932 NULL /* pXcptRaiseInfo */);
3933 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
3934 { /* likely */ }
3935 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
3936 {
3937 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
3938 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3939 u8Vector = X86_XCPT_DF;
3940 uErr = 0;
3941#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3942 /* VMX nested-guest #DF intercept needs to be checked here. */
3943 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3944 {
3945 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
3946 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3947 return rcStrict0;
3948 }
3949#endif
3950 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
3951 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
3952 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3953 }
3954 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
3955 {
3956 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
3957 return iemInitiateCpuShutdown(pVCpu);
3958 }
3959 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
3960 {
3961 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
3962 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
3963 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
3964 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
3965 return VERR_EM_GUEST_CPU_HANG;
3966 }
3967 else
3968 {
3969 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
3970 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
3971 return VERR_IEM_IPE_9;
3972 }
3973
3974 /*
3975 * The 'EXT' bit is set when an exception occurs during deliver of an external
3976 * event (such as an interrupt or earlier exception)[1]. Privileged software
3977 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
3978 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
3979 *
3980 * [1] - Intel spec. 6.13 "Error Code"
3981 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
3982 * [3] - Intel Instruction reference for INT n.
3983 */
3984 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
3985 && (fFlags & IEM_XCPT_FLAGS_ERR)
3986 && u8Vector != X86_XCPT_PF
3987 && u8Vector != X86_XCPT_DF)
3988 {
3989 uErr |= X86_TRAP_ERR_EXTERNAL;
3990 }
3991 }
3992
3993 pVCpu->iem.s.cXcptRecursions++;
3994 pVCpu->iem.s.uCurXcpt = u8Vector;
3995 pVCpu->iem.s.fCurXcpt = fFlags;
3996 pVCpu->iem.s.uCurXcptErr = uErr;
3997 pVCpu->iem.s.uCurXcptCr2 = uCr2;
3998
3999 /*
4000 * Extensive logging.
4001 */
4002#if defined(LOG_ENABLED) && defined(IN_RING3)
4003 if (LogIs3Enabled())
4004 {
4005 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
4006 PVM pVM = pVCpu->CTX_SUFF(pVM);
4007 char szRegs[4096];
4008 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4009 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4010 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4011 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4012 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4013 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4014 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4015 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4016 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4017 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4018 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4019 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4020 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4021 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4022 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4023 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4024 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4025 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4026 " efer=%016VR{efer}\n"
4027 " pat=%016VR{pat}\n"
4028 " sf_mask=%016VR{sf_mask}\n"
4029 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4030 " lstar=%016VR{lstar}\n"
4031 " star=%016VR{star} cstar=%016VR{cstar}\n"
4032 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4033 );
4034
4035 char szInstr[256];
4036 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4037 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4038 szInstr, sizeof(szInstr), NULL);
4039 Log3(("%s%s\n", szRegs, szInstr));
4040 }
4041#endif /* LOG_ENABLED */
4042
4043 /*
4044 * Stats.
4045 */
4046 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
4047 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
4048 else if (u8Vector <= X86_XCPT_LAST)
4049 {
4050 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
4051 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
4052 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, ASMReadTSC());
4053 }
4054
4055 /*
4056 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
4057 * to ensure that a stale TLB or paging cache entry will only cause one
4058 * spurious #PF.
4059 */
4060 if ( u8Vector == X86_XCPT_PF
4061 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
4062 IEMTlbInvalidatePage(pVCpu, uCr2);
4063
4064 /*
4065 * Call the mode specific worker function.
4066 */
4067 VBOXSTRICTRC rcStrict;
4068 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4069 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4070 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
4071 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4072 else
4073 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4074
4075 /* Flush the prefetch buffer. */
4076 iemOpcodeFlushHeavy(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
4077
4078 /*
4079 * Unwind.
4080 */
4081 pVCpu->iem.s.cXcptRecursions--;
4082 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
4083 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
4084 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
4085 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel,
4086 pVCpu->cpum.GstCtx.esp, IEM_GET_CPL(pVCpu), pVCpu->iem.s.cXcptRecursions + 1));
4087 return rcStrict;
4088}
4089
4090#ifdef IEM_WITH_SETJMP
4091/**
4092 * See iemRaiseXcptOrInt. Will not return.
4093 */
4094DECL_NO_RETURN(void)
4095iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
4096 uint8_t cbInstr,
4097 uint8_t u8Vector,
4098 uint32_t fFlags,
4099 uint16_t uErr,
4100 uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP
4101{
4102 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4103 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
4104}
4105#endif
4106
4107
4108/** \#DE - 00. */
4109VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
4110{
4111 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4112}
4113
4114
4115/** \#DB - 01.
4116 * @note This automatically clear DR7.GD. */
4117VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
4118{
4119 /* This always clears RF (via IEM_XCPT_FLAGS_DRx_INSTR_BP). */
4120 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
4121 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_DRx_INSTR_BP, 0, 0);
4122}
4123
4124
4125/** \#BR - 05. */
4126VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
4127{
4128 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4129}
4130
4131
4132/** \#UD - 06. */
4133VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
4134{
4135 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4136}
4137
4138
4139/** \#NM - 07. */
4140VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
4141{
4142 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4143}
4144
4145
4146/** \#TS(err) - 0a. */
4147VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4148{
4149 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4150}
4151
4152
4153/** \#TS(tr) - 0a. */
4154VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
4155{
4156 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4157 pVCpu->cpum.GstCtx.tr.Sel, 0);
4158}
4159
4160
4161/** \#TS(0) - 0a. */
4162VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4163{
4164 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4165 0, 0);
4166}
4167
4168
4169/** \#TS(err) - 0a. */
4170VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4171{
4172 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4173 uSel & X86_SEL_MASK_OFF_RPL, 0);
4174}
4175
4176
4177/** \#NP(err) - 0b. */
4178VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4179{
4180 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4181}
4182
4183
4184/** \#NP(sel) - 0b. */
4185VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4186{
4187 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4188 uSel & ~X86_SEL_RPL, 0);
4189}
4190
4191
4192/** \#SS(seg) - 0c. */
4193VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4194{
4195 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4196 uSel & ~X86_SEL_RPL, 0);
4197}
4198
4199
4200/** \#SS(err) - 0c. */
4201VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4202{
4203 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4204}
4205
4206
4207/** \#GP(n) - 0d. */
4208VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4209{
4210 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4211}
4212
4213
4214/** \#GP(0) - 0d. */
4215VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4216{
4217 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4218}
4219
4220#ifdef IEM_WITH_SETJMP
4221/** \#GP(0) - 0d. */
4222DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4223{
4224 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4225}
4226#endif
4227
4228
4229/** \#GP(sel) - 0d. */
4230VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4231{
4232 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4233 Sel & ~X86_SEL_RPL, 0);
4234}
4235
4236
4237/** \#GP(0) - 0d. */
4238VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4239{
4240 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4241}
4242
4243
4244/** \#GP(sel) - 0d. */
4245VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4246{
4247 NOREF(iSegReg); NOREF(fAccess);
4248 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4249 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4250}
4251
4252#ifdef IEM_WITH_SETJMP
4253/** \#GP(sel) - 0d, longjmp. */
4254DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4255{
4256 NOREF(iSegReg); NOREF(fAccess);
4257 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4258 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4259}
4260#endif
4261
4262/** \#GP(sel) - 0d. */
4263VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4264{
4265 NOREF(Sel);
4266 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4267}
4268
4269#ifdef IEM_WITH_SETJMP
4270/** \#GP(sel) - 0d, longjmp. */
4271DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP
4272{
4273 NOREF(Sel);
4274 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4275}
4276#endif
4277
4278
4279/** \#GP(sel) - 0d. */
4280VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4281{
4282 NOREF(iSegReg); NOREF(fAccess);
4283 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4284}
4285
4286#ifdef IEM_WITH_SETJMP
4287/** \#GP(sel) - 0d, longjmp. */
4288DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4289{
4290 NOREF(iSegReg); NOREF(fAccess);
4291 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4292}
4293#endif
4294
4295
4296/** \#PF(n) - 0e. */
4297VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT
4298{
4299 uint16_t uErr;
4300 switch (rc)
4301 {
4302 case VERR_PAGE_NOT_PRESENT:
4303 case VERR_PAGE_TABLE_NOT_PRESENT:
4304 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4305 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4306 uErr = 0;
4307 break;
4308
4309 default:
4310 AssertMsgFailed(("%Rrc\n", rc));
4311 RT_FALL_THRU();
4312 case VERR_ACCESS_DENIED:
4313 uErr = X86_TRAP_PF_P;
4314 break;
4315
4316 /** @todo reserved */
4317 }
4318
4319 if (IEM_GET_CPL(pVCpu) == 3)
4320 uErr |= X86_TRAP_PF_US;
4321
4322 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4323 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4324 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4325 uErr |= X86_TRAP_PF_ID;
4326
4327#if 0 /* This is so much non-sense, really. Why was it done like that? */
4328 /* Note! RW access callers reporting a WRITE protection fault, will clear
4329 the READ flag before calling. So, read-modify-write accesses (RW)
4330 can safely be reported as READ faults. */
4331 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4332 uErr |= X86_TRAP_PF_RW;
4333#else
4334 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4335 {
4336 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4337 /// (regardless of outcome of the comparison in the latter case).
4338 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4339 uErr |= X86_TRAP_PF_RW;
4340 }
4341#endif
4342
4343 /* For FXSAVE and FRSTOR the #PF is typically reported at the max address
4344 of the memory operand rather than at the start of it. (Not sure what
4345 happens if it crosses a page boundrary.) The current heuristics for
4346 this is to report the #PF for the last byte if the access is more than
4347 64 bytes. This is probably not correct, but we can work that out later,
4348 main objective now is to get FXSAVE to work like for real hardware and
4349 make bs3-cpu-basic2 work. */
4350 if (cbAccess <= 64)
4351 { /* likely*/ }
4352 else
4353 GCPtrWhere += cbAccess - 1;
4354
4355 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4356 uErr, GCPtrWhere);
4357}
4358
4359#ifdef IEM_WITH_SETJMP
4360/** \#PF(n) - 0e, longjmp. */
4361DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess,
4362 uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP
4363{
4364 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, cbAccess, fAccess, rc)));
4365}
4366#endif
4367
4368
4369/** \#MF(0) - 10. */
4370VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT
4371{
4372 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)
4373 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4374
4375 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
4376 PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
4377 return iemRegUpdateRipAndFinishClearingRF(pVCpu);
4378}
4379
4380
4381/** \#AC(0) - 11. */
4382VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT
4383{
4384 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4385}
4386
4387#ifdef IEM_WITH_SETJMP
4388/** \#AC(0) - 11, longjmp. */
4389DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4390{
4391 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4392}
4393#endif
4394
4395
4396/** \#XF(0)/\#XM(0) - 19. */
4397VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
4398{
4399 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4400}
4401
4402
4403/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4404IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4405{
4406 NOREF(cbInstr);
4407 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4408}
4409
4410
4411/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4412IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4413{
4414 NOREF(cbInstr);
4415 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4416}
4417
4418
4419/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4420IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4421{
4422 NOREF(cbInstr);
4423 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4424}
4425
4426
4427/** @} */
4428
4429/** @name Common opcode decoders.
4430 * @{
4431 */
4432//#include <iprt/mem.h>
4433
4434/**
4435 * Used to add extra details about a stub case.
4436 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4437 */
4438void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4439{
4440#if defined(LOG_ENABLED) && defined(IN_RING3)
4441 PVM pVM = pVCpu->CTX_SUFF(pVM);
4442 char szRegs[4096];
4443 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4444 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4445 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4446 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4447 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4448 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4449 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4450 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4451 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4452 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4453 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4454 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4455 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4456 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4457 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4458 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4459 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4460 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4461 " efer=%016VR{efer}\n"
4462 " pat=%016VR{pat}\n"
4463 " sf_mask=%016VR{sf_mask}\n"
4464 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4465 " lstar=%016VR{lstar}\n"
4466 " star=%016VR{star} cstar=%016VR{cstar}\n"
4467 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4468 );
4469
4470 char szInstr[256];
4471 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4472 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4473 szInstr, sizeof(szInstr), NULL);
4474
4475 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4476#else
4477 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4478#endif
4479}
4480
4481/** @} */
4482
4483
4484
4485/** @name Register Access.
4486 * @{
4487 */
4488
4489/**
4490 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4491 *
4492 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4493 * segment limit.
4494 *
4495 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4496 * @param cbInstr Instruction size.
4497 * @param offNextInstr The offset of the next instruction.
4498 * @param enmEffOpSize Effective operand size.
4499 */
4500VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
4501 IEMMODE enmEffOpSize) RT_NOEXCEPT
4502{
4503 switch (enmEffOpSize)
4504 {
4505 case IEMMODE_16BIT:
4506 {
4507 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
4508 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4509 || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */))
4510 pVCpu->cpum.GstCtx.rip = uNewIp;
4511 else
4512 return iemRaiseGeneralProtectionFault0(pVCpu);
4513 break;
4514 }
4515
4516 case IEMMODE_32BIT:
4517 {
4518 Assert(!IEM_IS_64BIT_CODE(pVCpu));
4519 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4520
4521 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
4522 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4523 pVCpu->cpum.GstCtx.rip = uNewEip;
4524 else
4525 return iemRaiseGeneralProtectionFault0(pVCpu);
4526 break;
4527 }
4528
4529 case IEMMODE_64BIT:
4530 {
4531 Assert(IEM_IS_64BIT_CODE(pVCpu));
4532
4533 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4534 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4535 pVCpu->cpum.GstCtx.rip = uNewRip;
4536 else
4537 return iemRaiseGeneralProtectionFault0(pVCpu);
4538 break;
4539 }
4540
4541 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4542 }
4543
4544#ifndef IEM_WITH_CODE_TLB
4545 /* Flush the prefetch buffer. */
4546 pVCpu->iem.s.cbOpcode = cbInstr;
4547#endif
4548
4549 /*
4550 * Clear RF and finish the instruction (maybe raise #DB).
4551 */
4552 return iemRegFinishClearingRF(pVCpu);
4553}
4554
4555
4556/**
4557 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4558 *
4559 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4560 * segment limit.
4561 *
4562 * @returns Strict VBox status code.
4563 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4564 * @param cbInstr Instruction size.
4565 * @param offNextInstr The offset of the next instruction.
4566 */
4567VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT
4568{
4569 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
4570
4571 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
4572 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4573 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checking in 64-bit mode */))
4574 pVCpu->cpum.GstCtx.rip = uNewIp;
4575 else
4576 return iemRaiseGeneralProtectionFault0(pVCpu);
4577
4578#ifndef IEM_WITH_CODE_TLB
4579 /* Flush the prefetch buffer. */
4580 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4581#endif
4582
4583 /*
4584 * Clear RF and finish the instruction (maybe raise #DB).
4585 */
4586 return iemRegFinishClearingRF(pVCpu);
4587}
4588
4589
4590/**
4591 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4592 *
4593 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4594 * segment limit.
4595 *
4596 * @returns Strict VBox status code.
4597 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4598 * @param cbInstr Instruction size.
4599 * @param offNextInstr The offset of the next instruction.
4600 * @param enmEffOpSize Effective operand size.
4601 */
4602VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
4603 IEMMODE enmEffOpSize) RT_NOEXCEPT
4604{
4605 if (enmEffOpSize == IEMMODE_32BIT)
4606 {
4607 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
4608
4609 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
4610 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4611 pVCpu->cpum.GstCtx.rip = uNewEip;
4612 else
4613 return iemRaiseGeneralProtectionFault0(pVCpu);
4614 }
4615 else
4616 {
4617 Assert(enmEffOpSize == IEMMODE_64BIT);
4618
4619 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4620 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4621 pVCpu->cpum.GstCtx.rip = uNewRip;
4622 else
4623 return iemRaiseGeneralProtectionFault0(pVCpu);
4624 }
4625
4626#ifndef IEM_WITH_CODE_TLB
4627 /* Flush the prefetch buffer. */
4628 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4629#endif
4630
4631 /*
4632 * Clear RF and finish the instruction (maybe raise #DB).
4633 */
4634 return iemRegFinishClearingRF(pVCpu);
4635}
4636
4637
4638/**
4639 * Performs a near jump to the specified address.
4640 *
4641 * May raise a \#GP(0) if the new IP outside the code segment limit.
4642 *
4643 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4644 * @param uNewIp The new IP value.
4645 */
4646VBOXSTRICTRC iemRegRipJumpU16AndFinishClearningRF(PVMCPUCC pVCpu, uint16_t uNewIp) RT_NOEXCEPT
4647{
4648 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4649 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checks in 64-bit mode */))
4650 pVCpu->cpum.GstCtx.rip = uNewIp;
4651 else
4652 return iemRaiseGeneralProtectionFault0(pVCpu);
4653 /** @todo Test 16-bit jump in 64-bit mode. */
4654
4655#ifndef IEM_WITH_CODE_TLB
4656 /* Flush the prefetch buffer. */
4657 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4658#endif
4659
4660 /*
4661 * Clear RF and finish the instruction (maybe raise #DB).
4662 */
4663 return iemRegFinishClearingRF(pVCpu);
4664}
4665
4666
4667/**
4668 * Performs a near jump to the specified address.
4669 *
4670 * May raise a \#GP(0) if the new RIP is outside the code segment limit.
4671 *
4672 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4673 * @param uNewEip The new EIP value.
4674 */
4675VBOXSTRICTRC iemRegRipJumpU32AndFinishClearningRF(PVMCPUCC pVCpu, uint32_t uNewEip) RT_NOEXCEPT
4676{
4677 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4678 Assert(!IEM_IS_64BIT_CODE(pVCpu));
4679
4680 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4681 pVCpu->cpum.GstCtx.rip = uNewEip;
4682 else
4683 return iemRaiseGeneralProtectionFault0(pVCpu);
4684
4685#ifndef IEM_WITH_CODE_TLB
4686 /* Flush the prefetch buffer. */
4687 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4688#endif
4689
4690 /*
4691 * Clear RF and finish the instruction (maybe raise #DB).
4692 */
4693 return iemRegFinishClearingRF(pVCpu);
4694}
4695
4696
4697/**
4698 * Performs a near jump to the specified address.
4699 *
4700 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4701 * segment limit.
4702 *
4703 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4704 * @param uNewRip The new RIP value.
4705 */
4706VBOXSTRICTRC iemRegRipJumpU64AndFinishClearningRF(PVMCPUCC pVCpu, uint64_t uNewRip) RT_NOEXCEPT
4707{
4708 Assert(IEM_IS_64BIT_CODE(pVCpu));
4709
4710 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4711 pVCpu->cpum.GstCtx.rip = uNewRip;
4712 else
4713 return iemRaiseGeneralProtectionFault0(pVCpu);
4714
4715#ifndef IEM_WITH_CODE_TLB
4716 /* Flush the prefetch buffer. */
4717 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4718#endif
4719
4720 /*
4721 * Clear RF and finish the instruction (maybe raise #DB).
4722 */
4723 return iemRegFinishClearingRF(pVCpu);
4724}
4725
4726/** @} */
4727
4728
4729/** @name FPU access and helpers.
4730 *
4731 * @{
4732 */
4733
4734/**
4735 * Updates the x87.DS and FPUDP registers.
4736 *
4737 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4738 * @param pFpuCtx The FPU context.
4739 * @param iEffSeg The effective segment register.
4740 * @param GCPtrEff The effective address relative to @a iEffSeg.
4741 */
4742DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4743{
4744 RTSEL sel;
4745 switch (iEffSeg)
4746 {
4747 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
4748 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
4749 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
4750 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
4751 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
4752 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
4753 default:
4754 AssertMsgFailed(("%d\n", iEffSeg));
4755 sel = pVCpu->cpum.GstCtx.ds.Sel;
4756 }
4757 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
4758 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4759 {
4760 pFpuCtx->DS = 0;
4761 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
4762 }
4763 else if (!IEM_IS_LONG_MODE(pVCpu)) /** @todo this is weird. explain. */
4764 {
4765 pFpuCtx->DS = sel;
4766 pFpuCtx->FPUDP = GCPtrEff;
4767 }
4768 else
4769 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
4770}
4771
4772
4773/**
4774 * Rotates the stack registers in the push direction.
4775 *
4776 * @param pFpuCtx The FPU context.
4777 * @remarks This is a complete waste of time, but fxsave stores the registers in
4778 * stack order.
4779 */
4780DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
4781{
4782 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
4783 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
4784 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
4785 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
4786 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
4787 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
4788 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
4789 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
4790 pFpuCtx->aRegs[0].r80 = r80Tmp;
4791}
4792
4793
4794/**
4795 * Rotates the stack registers in the pop direction.
4796 *
4797 * @param pFpuCtx The FPU context.
4798 * @remarks This is a complete waste of time, but fxsave stores the registers in
4799 * stack order.
4800 */
4801DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
4802{
4803 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
4804 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
4805 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
4806 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
4807 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
4808 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
4809 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
4810 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
4811 pFpuCtx->aRegs[7].r80 = r80Tmp;
4812}
4813
4814
4815/**
4816 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
4817 * exception prevents it.
4818 *
4819 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4820 * @param pResult The FPU operation result to push.
4821 * @param pFpuCtx The FPU context.
4822 */
4823static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4824{
4825 /* Update FSW and bail if there are pending exceptions afterwards. */
4826 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4827 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4828 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4829 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4830 {
4831 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))
4832 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",
4833 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4834 pFpuCtx->FSW = fFsw;
4835 return;
4836 }
4837
4838 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4839 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4840 {
4841 /* All is fine, push the actual value. */
4842 pFpuCtx->FTW |= RT_BIT(iNewTop);
4843 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
4844 }
4845 else if (pFpuCtx->FCW & X86_FCW_IM)
4846 {
4847 /* Masked stack overflow, push QNaN. */
4848 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4849 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4850 }
4851 else
4852 {
4853 /* Raise stack overflow, don't push anything. */
4854 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4855 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4856 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",
4857 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4858 return;
4859 }
4860
4861 fFsw &= ~X86_FSW_TOP_MASK;
4862 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4863 pFpuCtx->FSW = fFsw;
4864
4865 iemFpuRotateStackPush(pFpuCtx);
4866 RT_NOREF(pVCpu);
4867}
4868
4869
4870/**
4871 * Stores a result in a FPU register and updates the FSW and FTW.
4872 *
4873 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4874 * @param pFpuCtx The FPU context.
4875 * @param pResult The result to store.
4876 * @param iStReg Which FPU register to store it in.
4877 */
4878static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4879{
4880 Assert(iStReg < 8);
4881 uint16_t fNewFsw = pFpuCtx->FSW;
4882 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;
4883 fNewFsw &= ~X86_FSW_C_MASK;
4884 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4885 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4886 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4887 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4888 pFpuCtx->FSW = fNewFsw;
4889 pFpuCtx->FTW |= RT_BIT(iReg);
4890 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
4891 RT_NOREF(pVCpu);
4892}
4893
4894
4895/**
4896 * Only updates the FPU status word (FSW) with the result of the current
4897 * instruction.
4898 *
4899 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4900 * @param pFpuCtx The FPU context.
4901 * @param u16FSW The FSW output of the current instruction.
4902 */
4903static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
4904{
4905 uint16_t fNewFsw = pFpuCtx->FSW;
4906 fNewFsw &= ~X86_FSW_C_MASK;
4907 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;
4908 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4909 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4910 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4911 pFpuCtx->FSW = fNewFsw;
4912 RT_NOREF(pVCpu);
4913}
4914
4915
4916/**
4917 * Pops one item off the FPU stack if no pending exception prevents it.
4918 *
4919 * @param pFpuCtx The FPU context.
4920 */
4921static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4922{
4923 /* Check pending exceptions. */
4924 uint16_t uFSW = pFpuCtx->FSW;
4925 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4926 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4927 return;
4928
4929 /* TOP--. */
4930 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
4931 uFSW &= ~X86_FSW_TOP_MASK;
4932 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4933 pFpuCtx->FSW = uFSW;
4934
4935 /* Mark the previous ST0 as empty. */
4936 iOldTop >>= X86_FSW_TOP_SHIFT;
4937 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
4938
4939 /* Rotate the registers. */
4940 iemFpuRotateStackPop(pFpuCtx);
4941}
4942
4943
4944/**
4945 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
4946 *
4947 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4948 * @param pResult The FPU operation result to push.
4949 * @param uFpuOpcode The FPU opcode value.
4950 */
4951void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
4952{
4953 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4954 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
4955 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4956}
4957
4958
4959/**
4960 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
4961 * and sets FPUDP and FPUDS.
4962 *
4963 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4964 * @param pResult The FPU operation result to push.
4965 * @param iEffSeg The effective segment register.
4966 * @param GCPtrEff The effective address relative to @a iEffSeg.
4967 * @param uFpuOpcode The FPU opcode value.
4968 */
4969void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff,
4970 uint16_t uFpuOpcode) RT_NOEXCEPT
4971{
4972 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4973 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4974 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
4975 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4976}
4977
4978
4979/**
4980 * Replace ST0 with the first value and push the second onto the FPU stack,
4981 * unless a pending exception prevents it.
4982 *
4983 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4984 * @param pResult The FPU operation result to store and push.
4985 * @param uFpuOpcode The FPU opcode value.
4986 */
4987void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
4988{
4989 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4990 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
4991
4992 /* Update FSW and bail if there are pending exceptions afterwards. */
4993 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4994 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4995 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4996 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4997 {
4998 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4999 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",
5000 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
5001 pFpuCtx->FSW = fFsw;
5002 return;
5003 }
5004
5005 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5006 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5007 {
5008 /* All is fine, push the actual value. */
5009 pFpuCtx->FTW |= RT_BIT(iNewTop);
5010 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
5011 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
5012 }
5013 else if (pFpuCtx->FCW & X86_FCW_IM)
5014 {
5015 /* Masked stack overflow, push QNaN. */
5016 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5017 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5018 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5019 }
5020 else
5021 {
5022 /* Raise stack overflow, don't push anything. */
5023 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5024 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5025 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",
5026 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5027 return;
5028 }
5029
5030 fFsw &= ~X86_FSW_TOP_MASK;
5031 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5032 pFpuCtx->FSW = fFsw;
5033
5034 iemFpuRotateStackPush(pFpuCtx);
5035}
5036
5037
5038/**
5039 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5040 * FOP.
5041 *
5042 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5043 * @param pResult The result to store.
5044 * @param iStReg Which FPU register to store it in.
5045 * @param uFpuOpcode The FPU opcode value.
5046 */
5047void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5048{
5049 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5050 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5051 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5052}
5053
5054
5055/**
5056 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5057 * FOP, and then pops the stack.
5058 *
5059 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5060 * @param pResult The result to store.
5061 * @param iStReg Which FPU register to store it in.
5062 * @param uFpuOpcode The FPU opcode value.
5063 */
5064void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5065{
5066 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5067 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5068 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5069 iemFpuMaybePopOne(pFpuCtx);
5070}
5071
5072
5073/**
5074 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5075 * FPUDP, and FPUDS.
5076 *
5077 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5078 * @param pResult The result to store.
5079 * @param iStReg Which FPU register to store it in.
5080 * @param iEffSeg The effective memory operand selector register.
5081 * @param GCPtrEff The effective memory operand offset.
5082 * @param uFpuOpcode The FPU opcode value.
5083 */
5084void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5085 uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5086{
5087 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5088 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5089 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5090 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5091}
5092
5093
5094/**
5095 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5096 * FPUDP, and FPUDS, and then pops the stack.
5097 *
5098 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5099 * @param pResult The result to store.
5100 * @param iStReg Which FPU register to store it in.
5101 * @param iEffSeg The effective memory operand selector register.
5102 * @param GCPtrEff The effective memory operand offset.
5103 * @param uFpuOpcode The FPU opcode value.
5104 */
5105void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
5106 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5107{
5108 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5109 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5110 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5111 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5112 iemFpuMaybePopOne(pFpuCtx);
5113}
5114
5115
5116/**
5117 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5118 *
5119 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5120 * @param uFpuOpcode The FPU opcode value.
5121 */
5122void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5123{
5124 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5125 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5126}
5127
5128
5129/**
5130 * Updates the FSW, FOP, FPUIP, and FPUCS.
5131 *
5132 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5133 * @param u16FSW The FSW from the current instruction.
5134 * @param uFpuOpcode The FPU opcode value.
5135 */
5136void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5137{
5138 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5139 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5140 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5141}
5142
5143
5144/**
5145 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5146 *
5147 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5148 * @param u16FSW The FSW from the current instruction.
5149 * @param uFpuOpcode The FPU opcode value.
5150 */
5151void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5152{
5153 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5154 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5155 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5156 iemFpuMaybePopOne(pFpuCtx);
5157}
5158
5159
5160/**
5161 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5162 *
5163 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5164 * @param u16FSW The FSW from the current instruction.
5165 * @param iEffSeg The effective memory operand selector register.
5166 * @param GCPtrEff The effective memory operand offset.
5167 * @param uFpuOpcode The FPU opcode value.
5168 */
5169void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5170{
5171 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5172 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5173 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5174 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5175}
5176
5177
5178/**
5179 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5180 *
5181 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5182 * @param u16FSW The FSW from the current instruction.
5183 * @param uFpuOpcode The FPU opcode value.
5184 */
5185void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5186{
5187 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5188 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5189 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5190 iemFpuMaybePopOne(pFpuCtx);
5191 iemFpuMaybePopOne(pFpuCtx);
5192}
5193
5194
5195/**
5196 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5197 *
5198 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5199 * @param u16FSW The FSW from the current instruction.
5200 * @param iEffSeg The effective memory operand selector register.
5201 * @param GCPtrEff The effective memory operand offset.
5202 * @param uFpuOpcode The FPU opcode value.
5203 */
5204void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5205 uint16_t uFpuOpcode) RT_NOEXCEPT
5206{
5207 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5208 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5209 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5210 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5211 iemFpuMaybePopOne(pFpuCtx);
5212}
5213
5214
5215/**
5216 * Worker routine for raising an FPU stack underflow exception.
5217 *
5218 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5219 * @param pFpuCtx The FPU context.
5220 * @param iStReg The stack register being accessed.
5221 */
5222static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5223{
5224 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5225 if (pFpuCtx->FCW & X86_FCW_IM)
5226 {
5227 /* Masked underflow. */
5228 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5229 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5230 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5231 if (iStReg != UINT8_MAX)
5232 {
5233 pFpuCtx->FTW |= RT_BIT(iReg);
5234 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5235 }
5236 }
5237 else
5238 {
5239 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5240 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5241 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",
5242 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5243 }
5244 RT_NOREF(pVCpu);
5245}
5246
5247
5248/**
5249 * Raises a FPU stack underflow exception.
5250 *
5251 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5252 * @param iStReg The destination register that should be loaded
5253 * with QNaN if \#IS is not masked. Specify
5254 * UINT8_MAX if none (like for fcom).
5255 * @param uFpuOpcode The FPU opcode value.
5256 */
5257void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5258{
5259 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5260 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5261 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5262}
5263
5264
5265void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5266{
5267 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5268 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5269 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5270 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5271}
5272
5273
5274void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5275{
5276 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5277 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5278 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5279 iemFpuMaybePopOne(pFpuCtx);
5280}
5281
5282
5283void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5284 uint16_t uFpuOpcode) RT_NOEXCEPT
5285{
5286 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5287 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5288 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5289 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5290 iemFpuMaybePopOne(pFpuCtx);
5291}
5292
5293
5294void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5295{
5296 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5297 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5298 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
5299 iemFpuMaybePopOne(pFpuCtx);
5300 iemFpuMaybePopOne(pFpuCtx);
5301}
5302
5303
5304void iemFpuStackPushUnderflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5305{
5306 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5307 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5308
5309 if (pFpuCtx->FCW & X86_FCW_IM)
5310 {
5311 /* Masked overflow - Push QNaN. */
5312 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5313 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5314 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5315 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5316 pFpuCtx->FTW |= RT_BIT(iNewTop);
5317 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5318 iemFpuRotateStackPush(pFpuCtx);
5319 }
5320 else
5321 {
5322 /* Exception pending - don't change TOP or the register stack. */
5323 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5324 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5325 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",
5326 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5327 }
5328}
5329
5330
5331void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5332{
5333 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5334 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5335
5336 if (pFpuCtx->FCW & X86_FCW_IM)
5337 {
5338 /* Masked overflow - Push QNaN. */
5339 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5340 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5341 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5342 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5343 pFpuCtx->FTW |= RT_BIT(iNewTop);
5344 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5345 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5346 iemFpuRotateStackPush(pFpuCtx);
5347 }
5348 else
5349 {
5350 /* Exception pending - don't change TOP or the register stack. */
5351 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5352 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5353 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",
5354 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5355 }
5356}
5357
5358
5359/**
5360 * Worker routine for raising an FPU stack overflow exception on a push.
5361 *
5362 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5363 * @param pFpuCtx The FPU context.
5364 */
5365static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5366{
5367 if (pFpuCtx->FCW & X86_FCW_IM)
5368 {
5369 /* Masked overflow. */
5370 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5371 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5372 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5373 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5374 pFpuCtx->FTW |= RT_BIT(iNewTop);
5375 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5376 iemFpuRotateStackPush(pFpuCtx);
5377 }
5378 else
5379 {
5380 /* Exception pending - don't change TOP or the register stack. */
5381 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5382 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5383 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",
5384 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5385 }
5386 RT_NOREF(pVCpu);
5387}
5388
5389
5390/**
5391 * Raises a FPU stack overflow exception on a push.
5392 *
5393 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5394 * @param uFpuOpcode The FPU opcode value.
5395 */
5396void iemFpuStackPushOverflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5397{
5398 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5399 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5400 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5401}
5402
5403
5404/**
5405 * Raises a FPU stack overflow exception on a push with a memory operand.
5406 *
5407 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5408 * @param iEffSeg The effective memory operand selector register.
5409 * @param GCPtrEff The effective memory operand offset.
5410 * @param uFpuOpcode The FPU opcode value.
5411 */
5412void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5413{
5414 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5415 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5416 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5417 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5418}
5419
5420/** @} */
5421
5422
5423/** @name SSE+AVX SIMD access and helpers.
5424 *
5425 * @{
5426 */
5427/**
5428 * Stores a result in a SIMD XMM register, updates the MXCSR.
5429 *
5430 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5431 * @param pResult The result to store.
5432 * @param iXmmReg Which SIMD XMM register to store the result in.
5433 */
5434void iemSseStoreResult(PVMCPUCC pVCpu, PCIEMSSERESULT pResult, uint8_t iXmmReg) RT_NOEXCEPT
5435{
5436 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5437 pFpuCtx->MXCSR |= pResult->MXCSR & X86_MXCSR_XCPT_FLAGS;
5438
5439 /* The result is only updated if there is no unmasked exception pending. */
5440 if (( ~((pFpuCtx->MXCSR & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT)
5441 & (pFpuCtx->MXCSR & X86_MXCSR_XCPT_FLAGS)) == 0)
5442 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXmmReg] = pResult->uResult;
5443}
5444
5445
5446/**
5447 * Updates the MXCSR.
5448 *
5449 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5450 * @param fMxcsr The new MXCSR value.
5451 */
5452void iemSseUpdateMxcsr(PVMCPUCC pVCpu, uint32_t fMxcsr) RT_NOEXCEPT
5453{
5454 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5455 pFpuCtx->MXCSR |= fMxcsr & X86_MXCSR_XCPT_FLAGS;
5456}
5457/** @} */
5458
5459
5460/** @name Memory access.
5461 *
5462 * @{
5463 */
5464
5465
5466/**
5467 * Updates the IEMCPU::cbWritten counter if applicable.
5468 *
5469 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5470 * @param fAccess The access being accounted for.
5471 * @param cbMem The access size.
5472 */
5473DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5474{
5475 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5476 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5477 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5478}
5479
5480
5481/**
5482 * Applies the segment limit, base and attributes.
5483 *
5484 * This may raise a \#GP or \#SS.
5485 *
5486 * @returns VBox strict status code.
5487 *
5488 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5489 * @param fAccess The kind of access which is being performed.
5490 * @param iSegReg The index of the segment register to apply.
5491 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5492 * TSS, ++).
5493 * @param cbMem The access size.
5494 * @param pGCPtrMem Pointer to the guest memory address to apply
5495 * segmentation to. Input and output parameter.
5496 */
5497VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5498{
5499 if (iSegReg == UINT8_MAX)
5500 return VINF_SUCCESS;
5501
5502 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5503 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5504 switch (IEM_GET_CPU_MODE(pVCpu))
5505 {
5506 case IEMMODE_16BIT:
5507 case IEMMODE_32BIT:
5508 {
5509 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5510 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5511
5512 if ( pSel->Attr.n.u1Present
5513 && !pSel->Attr.n.u1Unusable)
5514 {
5515 Assert(pSel->Attr.n.u1DescType);
5516 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5517 {
5518 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5519 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5520 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5521
5522 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5523 {
5524 /** @todo CPL check. */
5525 }
5526
5527 /*
5528 * There are two kinds of data selectors, normal and expand down.
5529 */
5530 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5531 {
5532 if ( GCPtrFirst32 > pSel->u32Limit
5533 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5534 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5535 }
5536 else
5537 {
5538 /*
5539 * The upper boundary is defined by the B bit, not the G bit!
5540 */
5541 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5542 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5543 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5544 }
5545 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5546 }
5547 else
5548 {
5549 /*
5550 * Code selector and usually be used to read thru, writing is
5551 * only permitted in real and V8086 mode.
5552 */
5553 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5554 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5555 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5556 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5557 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5558
5559 if ( GCPtrFirst32 > pSel->u32Limit
5560 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5561 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5562
5563 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5564 {
5565 /** @todo CPL check. */
5566 }
5567
5568 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5569 }
5570 }
5571 else
5572 return iemRaiseGeneralProtectionFault0(pVCpu);
5573 return VINF_SUCCESS;
5574 }
5575
5576 case IEMMODE_64BIT:
5577 {
5578 RTGCPTR GCPtrMem = *pGCPtrMem;
5579 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5580 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5581
5582 Assert(cbMem >= 1);
5583 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5584 return VINF_SUCCESS;
5585 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5586 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5587 return iemRaiseGeneralProtectionFault0(pVCpu);
5588 }
5589
5590 default:
5591 AssertFailedReturn(VERR_IEM_IPE_7);
5592 }
5593}
5594
5595
5596/**
5597 * Translates a virtual address to a physical physical address and checks if we
5598 * can access the page as specified.
5599 *
5600 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5601 * @param GCPtrMem The virtual address.
5602 * @param cbAccess The access size, for raising \#PF correctly for
5603 * FXSAVE and such.
5604 * @param fAccess The intended access.
5605 * @param pGCPhysMem Where to return the physical address.
5606 */
5607VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess,
5608 uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5609{
5610 /** @todo Need a different PGM interface here. We're currently using
5611 * generic / REM interfaces. this won't cut it for R0. */
5612 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5613 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5614 * here. */
5615 PGMPTWALK Walk;
5616 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
5617 if (RT_FAILURE(rc))
5618 {
5619 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5620 /** @todo Check unassigned memory in unpaged mode. */
5621 /** @todo Reserved bits in page tables. Requires new PGM interface. */
5622#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5623 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5624 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5625#endif
5626 *pGCPhysMem = NIL_RTGCPHYS;
5627 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, rc);
5628 }
5629
5630 /* If the page is writable and does not have the no-exec bit set, all
5631 access is allowed. Otherwise we'll have to check more carefully... */
5632 if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
5633 {
5634 /* Write to read only memory? */
5635 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5636 && !(Walk.fEffective & X86_PTE_RW)
5637 && ( ( IEM_GET_CPL(pVCpu) == 3
5638 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5639 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
5640 {
5641 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5642 *pGCPhysMem = NIL_RTGCPHYS;
5643#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5644 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5645 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5646#endif
5647 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5648 }
5649
5650 /* Kernel memory accessed by userland? */
5651 if ( !(Walk.fEffective & X86_PTE_US)
5652 && IEM_GET_CPL(pVCpu) == 3
5653 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5654 {
5655 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5656 *pGCPhysMem = NIL_RTGCPHYS;
5657#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5658 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5659 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5660#endif
5661 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, VERR_ACCESS_DENIED);
5662 }
5663
5664 /* Executing non-executable memory? */
5665 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
5666 && (Walk.fEffective & X86_PTE_PAE_NX)
5667 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
5668 {
5669 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
5670 *pGCPhysMem = NIL_RTGCPHYS;
5671#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5672 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5673 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5674#endif
5675 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
5676 VERR_ACCESS_DENIED);
5677 }
5678 }
5679
5680 /*
5681 * Set the dirty / access flags.
5682 * ASSUMES this is set when the address is translated rather than on committ...
5683 */
5684 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5685 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
5686 if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty)
5687 {
5688 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
5689 AssertRC(rc2);
5690 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
5691 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
5692 }
5693
5694 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
5695 *pGCPhysMem = GCPhys;
5696 return VINF_SUCCESS;
5697}
5698
5699
5700/**
5701 * Looks up a memory mapping entry.
5702 *
5703 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
5704 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5705 * @param pvMem The memory address.
5706 * @param fAccess The access to.
5707 */
5708DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
5709{
5710 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5711 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
5712 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
5713 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5714 return 0;
5715 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
5716 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5717 return 1;
5718 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
5719 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5720 return 2;
5721 return VERR_NOT_FOUND;
5722}
5723
5724
5725/**
5726 * Finds a free memmap entry when using iNextMapping doesn't work.
5727 *
5728 * @returns Memory mapping index, 1024 on failure.
5729 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5730 */
5731static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
5732{
5733 /*
5734 * The easy case.
5735 */
5736 if (pVCpu->iem.s.cActiveMappings == 0)
5737 {
5738 pVCpu->iem.s.iNextMapping = 1;
5739 return 0;
5740 }
5741
5742 /* There should be enough mappings for all instructions. */
5743 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
5744
5745 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
5746 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
5747 return i;
5748
5749 AssertFailedReturn(1024);
5750}
5751
5752
5753/**
5754 * Commits a bounce buffer that needs writing back and unmaps it.
5755 *
5756 * @returns Strict VBox status code.
5757 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5758 * @param iMemMap The index of the buffer to commit.
5759 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
5760 * Always false in ring-3, obviously.
5761 */
5762static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
5763{
5764 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
5765 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
5766#ifdef IN_RING3
5767 Assert(!fPostponeFail);
5768 RT_NOREF_PV(fPostponeFail);
5769#endif
5770
5771 /*
5772 * Do the writing.
5773 */
5774 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5775 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
5776 {
5777 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
5778 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5779 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5780 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
5781 {
5782 /*
5783 * Carefully and efficiently dealing with access handler return
5784 * codes make this a little bloated.
5785 */
5786 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
5787 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5788 pbBuf,
5789 cbFirst,
5790 PGMACCESSORIGIN_IEM);
5791 if (rcStrict == VINF_SUCCESS)
5792 {
5793 if (cbSecond)
5794 {
5795 rcStrict = PGMPhysWrite(pVM,
5796 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5797 pbBuf + cbFirst,
5798 cbSecond,
5799 PGMACCESSORIGIN_IEM);
5800 if (rcStrict == VINF_SUCCESS)
5801 { /* nothing */ }
5802 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5803 {
5804 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
5805 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5806 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5807 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5808 }
5809#ifndef IN_RING3
5810 else if (fPostponeFail)
5811 {
5812 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5813 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5814 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5815 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5816 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5817 return iemSetPassUpStatus(pVCpu, rcStrict);
5818 }
5819#endif
5820 else
5821 {
5822 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5823 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5824 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5825 return rcStrict;
5826 }
5827 }
5828 }
5829 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5830 {
5831 if (!cbSecond)
5832 {
5833 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
5834 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5835 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5836 }
5837 else
5838 {
5839 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
5840 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5841 pbBuf + cbFirst,
5842 cbSecond,
5843 PGMACCESSORIGIN_IEM);
5844 if (rcStrict2 == VINF_SUCCESS)
5845 {
5846 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
5847 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5848 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5849 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5850 }
5851 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5852 {
5853 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
5854 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5855 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5856 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5857 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5858 }
5859#ifndef IN_RING3
5860 else if (fPostponeFail)
5861 {
5862 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5863 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5864 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5865 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5866 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5867 return iemSetPassUpStatus(pVCpu, rcStrict);
5868 }
5869#endif
5870 else
5871 {
5872 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5873 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5874 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5875 return rcStrict2;
5876 }
5877 }
5878 }
5879#ifndef IN_RING3
5880 else if (fPostponeFail)
5881 {
5882 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5883 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5884 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5885 if (!cbSecond)
5886 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
5887 else
5888 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
5889 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5890 return iemSetPassUpStatus(pVCpu, rcStrict);
5891 }
5892#endif
5893 else
5894 {
5895 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5896 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5897 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5898 return rcStrict;
5899 }
5900 }
5901 else
5902 {
5903 /*
5904 * No access handlers, much simpler.
5905 */
5906 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
5907 if (RT_SUCCESS(rc))
5908 {
5909 if (cbSecond)
5910 {
5911 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
5912 if (RT_SUCCESS(rc))
5913 { /* likely */ }
5914 else
5915 {
5916 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5917 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5918 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
5919 return rc;
5920 }
5921 }
5922 }
5923 else
5924 {
5925 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5926 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
5927 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5928 return rc;
5929 }
5930 }
5931 }
5932
5933#if defined(IEM_LOG_MEMORY_WRITES)
5934 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5935 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
5936 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
5937 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5938 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
5939 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
5940
5941 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5942 g_cbIemWrote = cbWrote;
5943 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
5944#endif
5945
5946 /*
5947 * Free the mapping entry.
5948 */
5949 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5950 Assert(pVCpu->iem.s.cActiveMappings != 0);
5951 pVCpu->iem.s.cActiveMappings--;
5952 return VINF_SUCCESS;
5953}
5954
5955
5956/**
5957 * iemMemMap worker that deals with a request crossing pages.
5958 */
5959static VBOXSTRICTRC
5960iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
5961{
5962 Assert(cbMem <= GUEST_PAGE_SIZE);
5963
5964 /*
5965 * Do the address translations.
5966 */
5967 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
5968 RTGCPHYS GCPhysFirst;
5969 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
5970 if (rcStrict != VINF_SUCCESS)
5971 return rcStrict;
5972 Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
5973
5974 uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
5975 RTGCPHYS GCPhysSecond;
5976 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
5977 cbSecondPage, fAccess, &GCPhysSecond);
5978 if (rcStrict != VINF_SUCCESS)
5979 return rcStrict;
5980 Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
5981 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
5982
5983 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5984
5985 /*
5986 * Read in the current memory content if it's a read, execute or partial
5987 * write access.
5988 */
5989 uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5990
5991 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5992 {
5993 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
5994 {
5995 /*
5996 * Must carefully deal with access handler status codes here,
5997 * makes the code a bit bloated.
5998 */
5999 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
6000 if (rcStrict == VINF_SUCCESS)
6001 {
6002 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6003 if (rcStrict == VINF_SUCCESS)
6004 { /*likely */ }
6005 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6006 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6007 else
6008 {
6009 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
6010 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6011 return rcStrict;
6012 }
6013 }
6014 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6015 {
6016 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6017 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6018 {
6019 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6020 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6021 }
6022 else
6023 {
6024 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6025 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6026 return rcStrict2;
6027 }
6028 }
6029 else
6030 {
6031 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6032 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6033 return rcStrict;
6034 }
6035 }
6036 else
6037 {
6038 /*
6039 * No informational status codes here, much more straight forward.
6040 */
6041 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6042 if (RT_SUCCESS(rc))
6043 {
6044 Assert(rc == VINF_SUCCESS);
6045 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6046 if (RT_SUCCESS(rc))
6047 Assert(rc == VINF_SUCCESS);
6048 else
6049 {
6050 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6051 return rc;
6052 }
6053 }
6054 else
6055 {
6056 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6057 return rc;
6058 }
6059 }
6060 }
6061#ifdef VBOX_STRICT
6062 else
6063 memset(pbBuf, 0xcc, cbMem);
6064 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6065 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6066#endif
6067 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
6068
6069 /*
6070 * Commit the bounce buffer entry.
6071 */
6072 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6073 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6074 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6075 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6076 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
6077 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6078 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6079 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6080 pVCpu->iem.s.cActiveMappings++;
6081
6082 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6083 *ppvMem = pbBuf;
6084 return VINF_SUCCESS;
6085}
6086
6087
6088/**
6089 * iemMemMap woker that deals with iemMemPageMap failures.
6090 */
6091static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
6092 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6093{
6094 /*
6095 * Filter out conditions we can handle and the ones which shouldn't happen.
6096 */
6097 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6098 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6099 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6100 {
6101 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6102 return rcMap;
6103 }
6104 pVCpu->iem.s.cPotentialExits++;
6105
6106 /*
6107 * Read in the current memory content if it's a read, execute or partial
6108 * write access.
6109 */
6110 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6111 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6112 {
6113 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6114 memset(pbBuf, 0xff, cbMem);
6115 else
6116 {
6117 int rc;
6118 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6119 {
6120 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6121 if (rcStrict == VINF_SUCCESS)
6122 { /* nothing */ }
6123 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6124 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6125 else
6126 {
6127 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6128 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6129 return rcStrict;
6130 }
6131 }
6132 else
6133 {
6134 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
6135 if (RT_SUCCESS(rc))
6136 { /* likely */ }
6137 else
6138 {
6139 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6140 GCPhysFirst, rc));
6141 return rc;
6142 }
6143 }
6144 }
6145 }
6146#ifdef VBOX_STRICT
6147 else
6148 memset(pbBuf, 0xcc, cbMem);
6149#endif
6150#ifdef VBOX_STRICT
6151 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6152 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6153#endif
6154
6155 /*
6156 * Commit the bounce buffer entry.
6157 */
6158 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6159 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6160 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6161 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
6162 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6163 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6164 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6165 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6166 pVCpu->iem.s.cActiveMappings++;
6167
6168 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6169 *ppvMem = pbBuf;
6170 return VINF_SUCCESS;
6171}
6172
6173
6174
6175/**
6176 * Maps the specified guest memory for the given kind of access.
6177 *
6178 * This may be using bounce buffering of the memory if it's crossing a page
6179 * boundary or if there is an access handler installed for any of it. Because
6180 * of lock prefix guarantees, we're in for some extra clutter when this
6181 * happens.
6182 *
6183 * This may raise a \#GP, \#SS, \#PF or \#AC.
6184 *
6185 * @returns VBox strict status code.
6186 *
6187 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6188 * @param ppvMem Where to return the pointer to the mapped memory.
6189 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
6190 * 8, 12, 16, 32 or 512. When used by string operations
6191 * it can be up to a page.
6192 * @param iSegReg The index of the segment register to use for this
6193 * access. The base and limits are checked. Use UINT8_MAX
6194 * to indicate that no segmentation is required (for IDT,
6195 * GDT and LDT accesses).
6196 * @param GCPtrMem The address of the guest memory.
6197 * @param fAccess How the memory is being accessed. The
6198 * IEM_ACCESS_TYPE_XXX bit is used to figure out how to map
6199 * the memory, while the IEM_ACCESS_WHAT_XXX bit is used
6200 * when raising exceptions.
6201 * @param uAlignCtl Alignment control:
6202 * - Bits 15:0 is the alignment mask.
6203 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6204 * IEM_MEMMAP_F_ALIGN_SSE, and
6205 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6206 * Pass zero to skip alignment.
6207 */
6208VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
6209 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
6210{
6211 /*
6212 * Check the input and figure out which mapping entry to use.
6213 */
6214 Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));
6215 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94
6216 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );
6217 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6218 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6219
6220 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6221 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6222 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6223 {
6224 iMemMap = iemMemMapFindFree(pVCpu);
6225 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6226 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6227 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6228 pVCpu->iem.s.aMemMappings[2].fAccess),
6229 VERR_IEM_IPE_9);
6230 }
6231
6232 /*
6233 * Map the memory, checking that we can actually access it. If something
6234 * slightly complicated happens, fall back on bounce buffering.
6235 */
6236 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6237 if (rcStrict == VINF_SUCCESS)
6238 { /* likely */ }
6239 else
6240 return rcStrict;
6241
6242 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
6243 { /* likely */ }
6244 else
6245 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
6246
6247 /*
6248 * Alignment check.
6249 */
6250 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6251 { /* likelyish */ }
6252 else
6253 {
6254 /* Misaligned access. */
6255 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6256 {
6257 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6258 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6259 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6260 {
6261 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6262
6263 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6264 return iemRaiseAlignmentCheckException(pVCpu);
6265 }
6266 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6267 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6268 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6269 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6270 * that's what FXSAVE does on a 10980xe. */
6271 && iemMemAreAlignmentChecksEnabled(pVCpu))
6272 return iemRaiseAlignmentCheckException(pVCpu);
6273 else
6274 return iemRaiseGeneralProtectionFault0(pVCpu);
6275 }
6276 }
6277
6278#ifdef IEM_WITH_DATA_TLB
6279 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6280
6281 /*
6282 * Get the TLB entry for this page.
6283 */
6284 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6285 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6286 if (pTlbe->uTag == uTag)
6287 {
6288# ifdef VBOX_WITH_STATISTICS
6289 pVCpu->iem.s.DataTlb.cTlbHits++;
6290# endif
6291 }
6292 else
6293 {
6294 pVCpu->iem.s.DataTlb.cTlbMisses++;
6295 PGMPTWALK Walk;
6296 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6297 if (RT_FAILURE(rc))
6298 {
6299 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6300# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6301 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6302 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6303# endif
6304 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6305 }
6306
6307 Assert(Walk.fSucceeded);
6308 pTlbe->uTag = uTag;
6309 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6310 pTlbe->GCPhys = Walk.GCPhys;
6311 pTlbe->pbMappingR3 = NULL;
6312 }
6313
6314 /*
6315 * Check TLB page table level access flags.
6316 */
6317 /* If the page is either supervisor only or non-writable, we need to do
6318 more careful access checks. */
6319 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
6320 {
6321 /* Write to read only memory? */
6322 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
6323 && (fAccess & IEM_ACCESS_TYPE_WRITE)
6324 && ( ( IEM_GET_CPL(pVCpu) == 3
6325 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6326 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
6327 {
6328 Log(("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6329# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6330 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6331 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6332# endif
6333 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6334 }
6335
6336 /* Kernel memory accessed by userland? */
6337 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6338 && IEM_GET_CPL(pVCpu) == 3
6339 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6340 {
6341 Log(("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6342# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6343 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6344 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6345# endif
6346 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6347 }
6348 }
6349
6350 /*
6351 * Set the dirty / access flags.
6352 * ASSUMES this is set when the address is translated rather than on commit...
6353 */
6354 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6355 uint64_t const fTlbAccessedDirty = (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0) | IEMTLBE_F_PT_NO_ACCESSED;
6356 if (pTlbe->fFlagsAndPhysRev & fTlbAccessedDirty)
6357 {
6358 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6359 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6360 AssertRC(rc2);
6361 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6362 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6363 pTlbe->fFlagsAndPhysRev &= ~fTlbAccessedDirty;
6364 }
6365
6366 /*
6367 * Look up the physical page info if necessary.
6368 */
6369 uint8_t *pbMem = NULL;
6370 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6371# ifdef IN_RING3
6372 pbMem = pTlbe->pbMappingR3;
6373# else
6374 pbMem = NULL;
6375# endif
6376 else
6377 {
6378 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6379 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6380 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6381 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6382 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
6383 { /* likely */ }
6384 else
6385 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
6386 pTlbe->pbMappingR3 = NULL;
6387 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6388 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
6389 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6390 &pbMem, &pTlbe->fFlagsAndPhysRev);
6391 AssertRCReturn(rc, rc);
6392# ifdef IN_RING3
6393 pTlbe->pbMappingR3 = pbMem;
6394# endif
6395 }
6396
6397 /*
6398 * Check the physical page level access and mapping.
6399 */
6400 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
6401 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
6402 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
6403 { /* probably likely */ }
6404 else
6405 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem,
6406 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6407 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6408 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6409 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6410 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6411
6412 if (pbMem)
6413 {
6414 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6415 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6416 fAccess |= IEM_ACCESS_NOT_LOCKED;
6417 }
6418 else
6419 {
6420 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6421 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6422 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6423 if (rcStrict != VINF_SUCCESS)
6424 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6425 }
6426
6427 void * const pvMem = pbMem;
6428
6429 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6430 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6431 if (fAccess & IEM_ACCESS_TYPE_READ)
6432 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6433
6434#else /* !IEM_WITH_DATA_TLB */
6435
6436 RTGCPHYS GCPhysFirst;
6437 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6438 if (rcStrict != VINF_SUCCESS)
6439 return rcStrict;
6440
6441 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6442 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6443 if (fAccess & IEM_ACCESS_TYPE_READ)
6444 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6445
6446 void *pvMem;
6447 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6448 if (rcStrict != VINF_SUCCESS)
6449 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6450
6451#endif /* !IEM_WITH_DATA_TLB */
6452
6453 /*
6454 * Fill in the mapping table entry.
6455 */
6456 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6457 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6458 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6459 pVCpu->iem.s.cActiveMappings += 1;
6460
6461 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6462 *ppvMem = pvMem;
6463
6464 return VINF_SUCCESS;
6465}
6466
6467
6468/**
6469 * Commits the guest memory if bounce buffered and unmaps it.
6470 *
6471 * @returns Strict VBox status code.
6472 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6473 * @param pvMem The mapping.
6474 * @param fAccess The kind of access.
6475 */
6476VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6477{
6478 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6479 AssertReturn(iMemMap >= 0, iMemMap);
6480
6481 /* If it's bounce buffered, we may need to write back the buffer. */
6482 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6483 {
6484 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6485 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6486 }
6487 /* Otherwise unlock it. */
6488 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6489 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6490
6491 /* Free the entry. */
6492 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6493 Assert(pVCpu->iem.s.cActiveMappings != 0);
6494 pVCpu->iem.s.cActiveMappings--;
6495 return VINF_SUCCESS;
6496}
6497
6498#ifdef IEM_WITH_SETJMP
6499
6500/**
6501 * Maps the specified guest memory for the given kind of access, longjmp on
6502 * error.
6503 *
6504 * This may be using bounce buffering of the memory if it's crossing a page
6505 * boundary or if there is an access handler installed for any of it. Because
6506 * of lock prefix guarantees, we're in for some extra clutter when this
6507 * happens.
6508 *
6509 * This may raise a \#GP, \#SS, \#PF or \#AC.
6510 *
6511 * @returns Pointer to the mapped memory.
6512 *
6513 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6514 * @param cbMem The number of bytes to map. This is usually 1,
6515 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6516 * string operations it can be up to a page.
6517 * @param iSegReg The index of the segment register to use for
6518 * this access. The base and limits are checked.
6519 * Use UINT8_MAX to indicate that no segmentation
6520 * is required (for IDT, GDT and LDT accesses).
6521 * @param GCPtrMem The address of the guest memory.
6522 * @param fAccess How the memory is being accessed. The
6523 * IEM_ACCESS_TYPE_XXX bit is used to figure out
6524 * how to map the memory, while the
6525 * IEM_ACCESS_WHAT_XXX bit is used when raising
6526 * exceptions.
6527 * @param uAlignCtl Alignment control:
6528 * - Bits 15:0 is the alignment mask.
6529 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6530 * IEM_MEMMAP_F_ALIGN_SSE, and
6531 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6532 * Pass zero to skip alignment.
6533 */
6534void *iemMemMapJmp(PVMCPUCC pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess,
6535 uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
6536{
6537 /*
6538 * Check the input, check segment access and adjust address
6539 * with segment base.
6540 */
6541 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6542 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6543 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6544
6545 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6546 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6547 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6548
6549 /*
6550 * Alignment check.
6551 */
6552 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6553 { /* likelyish */ }
6554 else
6555 {
6556 /* Misaligned access. */
6557 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6558 {
6559 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6560 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6561 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6562 {
6563 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6564
6565 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6566 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6567 }
6568 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6569 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6570 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6571 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6572 * that's what FXSAVE does on a 10980xe. */
6573 && iemMemAreAlignmentChecksEnabled(pVCpu))
6574 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6575 else
6576 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
6577 }
6578 }
6579
6580 /*
6581 * Figure out which mapping entry to use.
6582 */
6583 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6584 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6585 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6586 {
6587 iMemMap = iemMemMapFindFree(pVCpu);
6588 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6589 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6590 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6591 pVCpu->iem.s.aMemMappings[2].fAccess),
6592 IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_9));
6593 }
6594
6595 /*
6596 * Crossing a page boundary?
6597 */
6598 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
6599 { /* No (likely). */ }
6600 else
6601 {
6602 void *pvMem;
6603 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
6604 if (rcStrict == VINF_SUCCESS)
6605 return pvMem;
6606 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6607 }
6608
6609#ifdef IEM_WITH_DATA_TLB
6610 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6611
6612 /*
6613 * Get the TLB entry for this page.
6614 */
6615 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6616 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6617 if (pTlbe->uTag == uTag)
6618 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6619 else
6620 {
6621 pVCpu->iem.s.DataTlb.cTlbMisses++;
6622 PGMPTWALK Walk;
6623 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6624 if (RT_FAILURE(rc))
6625 {
6626 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6627# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6628 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6629 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6630# endif
6631 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6632 }
6633
6634 Assert(Walk.fSucceeded);
6635 pTlbe->uTag = uTag;
6636 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6637 pTlbe->GCPhys = Walk.GCPhys;
6638 pTlbe->pbMappingR3 = NULL;
6639 }
6640
6641 /*
6642 * Check the flags and physical revision.
6643 */
6644 /** @todo make the caller pass these in with fAccess. */
6645 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && IEM_GET_CPL(pVCpu) == 3
6646 ? IEMTLBE_F_PT_NO_USER : 0;
6647 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
6648 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
6649 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
6650 || (IEM_GET_CPL(pVCpu) == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6651 ? IEMTLBE_F_PT_NO_WRITE : 0)
6652 : 0;
6653 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
6654 uint8_t *pbMem = NULL;
6655 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
6656 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6657# ifdef IN_RING3
6658 pbMem = pTlbe->pbMappingR3;
6659# else
6660 pbMem = NULL;
6661# endif
6662 else
6663 {
6664 /*
6665 * Okay, something isn't quite right or needs refreshing.
6666 */
6667 /* Write to read only memory? */
6668 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
6669 {
6670 Log(("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6671# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6672 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6673 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6674# endif
6675 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6676 }
6677
6678 /* Kernel memory accessed by userland? */
6679 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
6680 {
6681 Log(("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6682# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6683 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6684 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6685# endif
6686 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6687 }
6688
6689 /* Set the dirty / access flags.
6690 ASSUMES this is set when the address is translated rather than on commit... */
6691 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6692 if (pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED))
6693 {
6694 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6695 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6696 AssertRC(rc2);
6697 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6698 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6699 pTlbe->fFlagsAndPhysRev &= ~((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED);
6700 }
6701
6702 /*
6703 * Check if the physical page info needs updating.
6704 */
6705 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6706# ifdef IN_RING3
6707 pbMem = pTlbe->pbMappingR3;
6708# else
6709 pbMem = NULL;
6710# endif
6711 else
6712 {
6713 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6714 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6715 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6716 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6717 pTlbe->pbMappingR3 = NULL;
6718 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6719 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
6720 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6721 &pbMem, &pTlbe->fFlagsAndPhysRev);
6722 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
6723# ifdef IN_RING3
6724 pTlbe->pbMappingR3 = pbMem;
6725# endif
6726 }
6727
6728 /*
6729 * Check the physical page level access and mapping.
6730 */
6731 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
6732 { /* probably likely */ }
6733 else
6734 {
6735 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, cbMem,
6736 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6737 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6738 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6739 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6740 if (rcStrict == VINF_SUCCESS)
6741 return pbMem;
6742 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6743 }
6744 }
6745 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6746
6747 if (pbMem)
6748 {
6749 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6750 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6751 fAccess |= IEM_ACCESS_NOT_LOCKED;
6752 }
6753 else
6754 {
6755 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6756 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6757 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6758 if (rcStrict == VINF_SUCCESS)
6759 return pbMem;
6760 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6761 }
6762
6763 void * const pvMem = pbMem;
6764
6765 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6766 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6767 if (fAccess & IEM_ACCESS_TYPE_READ)
6768 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6769
6770#else /* !IEM_WITH_DATA_TLB */
6771
6772
6773 RTGCPHYS GCPhysFirst;
6774 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6775 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6776 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6777
6778 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6779 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6780 if (fAccess & IEM_ACCESS_TYPE_READ)
6781 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6782
6783 void *pvMem;
6784 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6785 if (rcStrict == VINF_SUCCESS)
6786 { /* likely */ }
6787 else
6788 {
6789 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6790 if (rcStrict == VINF_SUCCESS)
6791 return pvMem;
6792 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6793 }
6794
6795#endif /* !IEM_WITH_DATA_TLB */
6796
6797 /*
6798 * Fill in the mapping table entry.
6799 */
6800 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6801 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6802 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6803 pVCpu->iem.s.cActiveMappings++;
6804
6805 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6806 return pvMem;
6807}
6808
6809
6810/**
6811 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
6812 *
6813 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6814 * @param pvMem The mapping.
6815 * @param fAccess The kind of access.
6816 */
6817void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
6818{
6819 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6820 AssertStmt(iMemMap >= 0, IEM_DO_LONGJMP(pVCpu, iMemMap));
6821
6822 /* If it's bounce buffered, we may need to write back the buffer. */
6823 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6824 {
6825 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6826 {
6827 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6828 if (rcStrict == VINF_SUCCESS)
6829 return;
6830 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6831 }
6832 }
6833 /* Otherwise unlock it. */
6834 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6835 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6836
6837 /* Free the entry. */
6838 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6839 Assert(pVCpu->iem.s.cActiveMappings != 0);
6840 pVCpu->iem.s.cActiveMappings--;
6841}
6842
6843#endif /* IEM_WITH_SETJMP */
6844
6845#ifndef IN_RING3
6846/**
6847 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
6848 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
6849 *
6850 * Allows the instruction to be completed and retired, while the IEM user will
6851 * return to ring-3 immediately afterwards and do the postponed writes there.
6852 *
6853 * @returns VBox status code (no strict statuses). Caller must check
6854 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
6855 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6856 * @param pvMem The mapping.
6857 * @param fAccess The kind of access.
6858 */
6859VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6860{
6861 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6862 AssertReturn(iMemMap >= 0, iMemMap);
6863
6864 /* If it's bounce buffered, we may need to write back the buffer. */
6865 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6866 {
6867 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6868 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
6869 }
6870 /* Otherwise unlock it. */
6871 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6872 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6873
6874 /* Free the entry. */
6875 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6876 Assert(pVCpu->iem.s.cActiveMappings != 0);
6877 pVCpu->iem.s.cActiveMappings--;
6878 return VINF_SUCCESS;
6879}
6880#endif
6881
6882
6883/**
6884 * Rollbacks mappings, releasing page locks and such.
6885 *
6886 * The caller shall only call this after checking cActiveMappings.
6887 *
6888 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6889 */
6890void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
6891{
6892 Assert(pVCpu->iem.s.cActiveMappings > 0);
6893
6894 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
6895 while (iMemMap-- > 0)
6896 {
6897 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
6898 if (fAccess != IEM_ACCESS_INVALID)
6899 {
6900 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
6901 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6902 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
6903 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6904 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
6905 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
6906 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
6907 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
6908 pVCpu->iem.s.cActiveMappings--;
6909 }
6910 }
6911}
6912
6913
6914/**
6915 * Fetches a data byte.
6916 *
6917 * @returns Strict VBox status code.
6918 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6919 * @param pu8Dst Where to return the byte.
6920 * @param iSegReg The index of the segment register to use for
6921 * this access. The base and limits are checked.
6922 * @param GCPtrMem The address of the guest memory.
6923 */
6924VBOXSTRICTRC iemMemFetchDataU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6925{
6926 /* The lazy approach for now... */
6927 uint8_t const *pu8Src;
6928 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 0);
6929 if (rc == VINF_SUCCESS)
6930 {
6931 *pu8Dst = *pu8Src;
6932 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6933 Log9(("IEM RD byte %d|%RGv: %#04x\n", iSegReg, GCPtrMem, *pu8Dst));
6934 }
6935 return rc;
6936}
6937
6938
6939#ifdef IEM_WITH_SETJMP
6940/**
6941 * Fetches a data byte, longjmp on error.
6942 *
6943 * @returns The byte.
6944 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6945 * @param iSegReg The index of the segment register to use for
6946 * this access. The base and limits are checked.
6947 * @param GCPtrMem The address of the guest memory.
6948 */
6949uint8_t iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
6950{
6951 /* The lazy approach for now... */
6952 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 0);
6953 uint8_t const bRet = *pu8Src;
6954 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6955 Log9(("IEM RD byte %d|%RGv: %#04x\n", iSegReg, GCPtrMem, bRet));
6956 return bRet;
6957}
6958#endif /* IEM_WITH_SETJMP */
6959
6960
6961/**
6962 * Fetches a data word.
6963 *
6964 * @returns Strict VBox status code.
6965 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6966 * @param pu16Dst Where to return the word.
6967 * @param iSegReg The index of the segment register to use for
6968 * this access. The base and limits are checked.
6969 * @param GCPtrMem The address of the guest memory.
6970 */
6971VBOXSTRICTRC iemMemFetchDataU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6972{
6973 /* The lazy approach for now... */
6974 uint16_t const *pu16Src;
6975 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem,
6976 IEM_ACCESS_DATA_R, sizeof(*pu16Src) - 1);
6977 if (rc == VINF_SUCCESS)
6978 {
6979 *pu16Dst = *pu16Src;
6980 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6981 Log9(("IEM RD word %d|%RGv: %#06x\n", iSegReg, GCPtrMem, *pu16Dst));
6982 }
6983 return rc;
6984}
6985
6986
6987#ifdef IEM_WITH_SETJMP
6988/**
6989 * Fetches a data word, longjmp on error.
6990 *
6991 * @returns The word
6992 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6993 * @param iSegReg The index of the segment register to use for
6994 * this access. The base and limits are checked.
6995 * @param GCPtrMem The address of the guest memory.
6996 */
6997uint16_t iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
6998{
6999 /* The lazy approach for now... */
7000 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7001 sizeof(*pu16Src) - 1);
7002 uint16_t const u16Ret = *pu16Src;
7003 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
7004 Log9(("IEM RD word %d|%RGv: %#06x\n", iSegReg, GCPtrMem, u16Ret));
7005 return u16Ret;
7006}
7007#endif
7008
7009
7010/**
7011 * Fetches a data dword.
7012 *
7013 * @returns Strict VBox status code.
7014 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7015 * @param pu32Dst Where to return the dword.
7016 * @param iSegReg The index of the segment register to use for
7017 * this access. The base and limits are checked.
7018 * @param GCPtrMem The address of the guest memory.
7019 */
7020VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7021{
7022 /* The lazy approach for now... */
7023 uint32_t const *pu32Src;
7024 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
7025 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7026 if (rc == VINF_SUCCESS)
7027 {
7028 *pu32Dst = *pu32Src;
7029 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7030 Log9(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, *pu32Dst));
7031 }
7032 return rc;
7033}
7034
7035
7036/**
7037 * Fetches a data dword and zero extends it to a qword.
7038 *
7039 * @returns Strict VBox status code.
7040 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7041 * @param pu64Dst Where to return the qword.
7042 * @param iSegReg The index of the segment register to use for
7043 * this access. The base and limits are checked.
7044 * @param GCPtrMem The address of the guest memory.
7045 */
7046VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7047{
7048 /* The lazy approach for now... */
7049 uint32_t const *pu32Src;
7050 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
7051 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7052 if (rc == VINF_SUCCESS)
7053 {
7054 *pu64Dst = *pu32Src;
7055 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7056 Log9(("IEM RD dword %d|%RGv: %#010RX64\n", iSegReg, GCPtrMem, *pu64Dst));
7057 }
7058 return rc;
7059}
7060
7061
7062#ifdef IEM_WITH_SETJMP
7063
7064/**
7065 * Fetches a data dword, longjmp on error, fallback/safe version.
7066 *
7067 * @returns The dword
7068 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7069 * @param iSegReg The index of the segment register to use for
7070 * this access. The base and limits are checked.
7071 * @param GCPtrMem The address of the guest memory.
7072 */
7073uint32_t iemMemFetchDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7074{
7075 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7076 sizeof(*pu32Src) - 1);
7077 uint32_t const u32Ret = *pu32Src;
7078 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7079 Log9(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, u32Ret));
7080 return u32Ret;
7081}
7082
7083
7084/**
7085 * Fetches a data dword, longjmp on error.
7086 *
7087 * @returns The dword
7088 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7089 * @param iSegReg The index of the segment register to use for
7090 * this access. The base and limits are checked.
7091 * @param GCPtrMem The address of the guest memory.
7092 */
7093uint32_t iemMemFetchDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7094{
7095# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
7096 /*
7097 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
7098 */
7099 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
7100 if (RT_LIKELY((GCPtrEff & GUEST_PAGE_OFFSET_MASK) <= GUEST_PAGE_SIZE - sizeof(uint32_t)))
7101 {
7102 /*
7103 * TLB lookup.
7104 */
7105 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
7106 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
7107 if (pTlbe->uTag == uTag)
7108 {
7109 /*
7110 * Check TLB page table level access flags.
7111 */
7112 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
7113 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
7114 if ( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
7115 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
7116 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
7117 {
7118 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
7119
7120 /*
7121 * Alignment check:
7122 */
7123 /** @todo check priority \#AC vs \#PF */
7124 if ( !(GCPtrEff & (sizeof(uint32_t) - 1))
7125 || !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
7126 || !pVCpu->cpum.GstCtx.eflags.Bits.u1AC
7127 || IEM_GET_CPL(pVCpu) != 3)
7128 {
7129 /*
7130 * Fetch and return the dword
7131 */
7132 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
7133 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
7134 uint32_t const u32Ret = *(uint32_t const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
7135 Log9(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, u32Ret));
7136 return u32Ret;
7137 }
7138 Log10(("iemMemFetchDataU32Jmp: Raising #AC for %RGv\n", GCPtrEff));
7139 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
7140 }
7141 }
7142 }
7143
7144 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
7145 outdated page pointer, or other troubles. */
7146 Log10(("iemMemFetchDataU32Jmp: %u:%RGv fallback\n", iSegReg, GCPtrMem));
7147 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
7148
7149# else
7150 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem,
7151 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7152 uint32_t const u32Ret = *pu32Src;
7153 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7154 Log9(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, u32Ret));
7155 return u32Ret;
7156# endif
7157}
7158
7159/**
7160 * Fetches a data dword from a FLAT address, longjmp on error.
7161 *
7162 * @returns The dword
7163 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7164 * @param GCPtrMem The address of the guest memory.
7165 */
7166uint32_t iemMemFlatFetchDataU32Jmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7167{
7168# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
7169 /*
7170 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
7171 */
7172 RTGCPTR GCPtrEff = GCPtrMem;
7173 if (RT_LIKELY((GCPtrEff & GUEST_PAGE_OFFSET_MASK) <= GUEST_PAGE_SIZE - sizeof(uint32_t)))
7174 {
7175 /*
7176 * TLB lookup.
7177 */
7178 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
7179 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
7180 if (pTlbe->uTag == uTag)
7181 {
7182 /*
7183 * Check TLB page table level access flags.
7184 */
7185 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
7186 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
7187 if ( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
7188 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
7189 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
7190 {
7191 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
7192
7193 /*
7194 * Alignment check:
7195 */
7196 /** @todo check priority \#AC vs \#PF */
7197 if ( !(GCPtrEff & (sizeof(uint32_t) - 1))
7198 || !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
7199 || !pVCpu->cpum.GstCtx.eflags.Bits.u1AC
7200 || IEM_GET_CPL(pVCpu) != 3)
7201 {
7202 /*
7203 * Fetch and return the dword
7204 */
7205 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
7206 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
7207 uint32_t const u32Ret = *(uint32_t const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
7208 Log9(("IEM RD dword %RGv: %#010x\n", GCPtrMem, u32Ret));
7209 return u32Ret;
7210 }
7211 Log10(("iemMemFlatFetchDataU32Jmp: Raising #AC for %RGv\n", GCPtrEff));
7212 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
7213 }
7214 }
7215 }
7216
7217 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
7218 outdated page pointer, or other troubles. */
7219 Log10(("iemMemFlatFetchDataU32Jmp: %RGv fallback\n", GCPtrMem));
7220 return iemMemFetchDataU32SafeJmp(pVCpu, UINT8_MAX, GCPtrMem);
7221
7222# else
7223 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), UINT8_MAX, GCPtrMem,
7224 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7225 uint32_t const u32Ret = *pu32Src;
7226 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7227 Log9(("IEM RD dword %RGv: %#010x\n", GCPtrMem, u32Ret));
7228 return u32Ret;
7229# endif
7230}
7231
7232#endif /* IEM_WITH_SETJMP */
7233
7234
7235#ifdef SOME_UNUSED_FUNCTION
7236/**
7237 * Fetches a data dword and sign extends it to a qword.
7238 *
7239 * @returns Strict VBox status code.
7240 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7241 * @param pu64Dst Where to return the sign extended value.
7242 * @param iSegReg The index of the segment register to use for
7243 * this access. The base and limits are checked.
7244 * @param GCPtrMem The address of the guest memory.
7245 */
7246VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7247{
7248 /* The lazy approach for now... */
7249 int32_t const *pi32Src;
7250 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem,
7251 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
7252 if (rc == VINF_SUCCESS)
7253 {
7254 *pu64Dst = *pi32Src;
7255 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
7256 Log9(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, (uint32_t)*pu64Dst));
7257 }
7258#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7259 else
7260 *pu64Dst = 0;
7261#endif
7262 return rc;
7263}
7264#endif
7265
7266
7267/**
7268 * Fetches a data qword.
7269 *
7270 * @returns Strict VBox status code.
7271 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7272 * @param pu64Dst Where to return the qword.
7273 * @param iSegReg The index of the segment register to use for
7274 * this access. The base and limits are checked.
7275 * @param GCPtrMem The address of the guest memory.
7276 */
7277VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7278{
7279 /* The lazy approach for now... */
7280 uint64_t const *pu64Src;
7281 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem,
7282 IEM_ACCESS_DATA_R, sizeof(*pu64Src) - 1);
7283 if (rc == VINF_SUCCESS)
7284 {
7285 *pu64Dst = *pu64Src;
7286 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7287 Log9(("IEM RD qword %d|%RGv: %#018RX64\n", iSegReg, GCPtrMem, *pu64Dst));
7288 }
7289 return rc;
7290}
7291
7292
7293#ifdef IEM_WITH_SETJMP
7294/**
7295 * Fetches a data qword, longjmp on error.
7296 *
7297 * @returns The qword.
7298 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7299 * @param iSegReg The index of the segment register to use for
7300 * this access. The base and limits are checked.
7301 * @param GCPtrMem The address of the guest memory.
7302 */
7303uint64_t iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7304{
7305 /* The lazy approach for now... */
7306 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem,
7307 IEM_ACCESS_DATA_R, sizeof(*pu64Src) - 1);
7308 uint64_t const u64Ret = *pu64Src;
7309 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7310 Log9(("IEM RD qword %d|%RGv: %#018RX64\n", iSegReg, GCPtrMem, u64Ret));
7311 return u64Ret;
7312}
7313#endif
7314
7315
7316/**
7317 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
7318 *
7319 * @returns Strict VBox status code.
7320 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7321 * @param pu64Dst Where to return the qword.
7322 * @param iSegReg The index of the segment register to use for
7323 * this access. The base and limits are checked.
7324 * @param GCPtrMem The address of the guest memory.
7325 */
7326VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7327{
7328 /* The lazy approach for now... */
7329 uint64_t const *pu64Src;
7330 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem,
7331 IEM_ACCESS_DATA_R, 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7332 if (rc == VINF_SUCCESS)
7333 {
7334 *pu64Dst = *pu64Src;
7335 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7336 Log9(("IEM RD qword %d|%RGv: %#018RX64\n", iSegReg, GCPtrMem, *pu64Dst));
7337 }
7338 return rc;
7339}
7340
7341
7342#ifdef IEM_WITH_SETJMP
7343/**
7344 * Fetches a data qword, longjmp on error.
7345 *
7346 * @returns The qword.
7347 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7348 * @param iSegReg The index of the segment register to use for
7349 * this access. The base and limits are checked.
7350 * @param GCPtrMem The address of the guest memory.
7351 */
7352uint64_t iemMemFetchDataU64AlignedU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7353{
7354 /* The lazy approach for now... */
7355 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7356 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7357 uint64_t const u64Ret = *pu64Src;
7358 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7359 Log9(("IEM RD qword %d|%RGv: %#018RX64\n", iSegReg, GCPtrMem, u64Ret));
7360 return u64Ret;
7361}
7362#endif
7363
7364
7365/**
7366 * Fetches a data tword.
7367 *
7368 * @returns Strict VBox status code.
7369 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7370 * @param pr80Dst Where to return the tword.
7371 * @param iSegReg The index of the segment register to use for
7372 * this access. The base and limits are checked.
7373 * @param GCPtrMem The address of the guest memory.
7374 */
7375VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7376{
7377 /* The lazy approach for now... */
7378 PCRTFLOAT80U pr80Src;
7379 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7);
7380 if (rc == VINF_SUCCESS)
7381 {
7382 *pr80Dst = *pr80Src;
7383 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7384 Log9(("IEM RD tword %d|%RGv: %.10Rhxs\n", iSegReg, GCPtrMem, pr80Dst));
7385 }
7386 return rc;
7387}
7388
7389
7390#ifdef IEM_WITH_SETJMP
7391/**
7392 * Fetches a data tword, longjmp on error.
7393 *
7394 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7395 * @param pr80Dst Where to return the tword.
7396 * @param iSegReg The index of the segment register to use for
7397 * this access. The base and limits are checked.
7398 * @param GCPtrMem The address of the guest memory.
7399 */
7400void iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7401{
7402 /* The lazy approach for now... */
7403 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7);
7404 *pr80Dst = *pr80Src;
7405 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7406 Log9(("IEM RD tword %d|%RGv: %.10Rhxs\n", iSegReg, GCPtrMem, pr80Dst));
7407}
7408#endif
7409
7410
7411/**
7412 * Fetches a data decimal tword.
7413 *
7414 * @returns Strict VBox status code.
7415 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7416 * @param pd80Dst Where to return the tword.
7417 * @param iSegReg The index of the segment register to use for
7418 * this access. The base and limits are checked.
7419 * @param GCPtrMem The address of the guest memory.
7420 */
7421VBOXSTRICTRC iemMemFetchDataD80(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7422{
7423 /* The lazy approach for now... */
7424 PCRTPBCD80U pd80Src;
7425 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pd80Src, sizeof(*pd80Src), iSegReg, GCPtrMem,
7426 IEM_ACCESS_DATA_R, 7 /** @todo FBLD alignment check */);
7427 if (rc == VINF_SUCCESS)
7428 {
7429 *pd80Dst = *pd80Src;
7430 rc = iemMemCommitAndUnmap(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
7431 Log9(("IEM RD tword %d|%RGv: %.10Rhxs\n", iSegReg, GCPtrMem, pd80Dst));
7432 }
7433 return rc;
7434}
7435
7436
7437#ifdef IEM_WITH_SETJMP
7438/**
7439 * Fetches a data decimal tword, longjmp on error.
7440 *
7441 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7442 * @param pd80Dst Where to return the tword.
7443 * @param iSegReg The index of the segment register to use for
7444 * this access. The base and limits are checked.
7445 * @param GCPtrMem The address of the guest memory.
7446 */
7447void iemMemFetchDataD80Jmp(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7448{
7449 /* The lazy approach for now... */
7450 PCRTPBCD80U pd80Src = (PCRTPBCD80U)iemMemMapJmp(pVCpu, sizeof(*pd80Src), iSegReg, GCPtrMem,
7451 IEM_ACCESS_DATA_R, 7 /** @todo FBSTP alignment check */);
7452 *pd80Dst = *pd80Src;
7453 iemMemCommitAndUnmapJmp(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
7454 Log9(("IEM RD tword %d|%RGv: %.10Rhxs\n", iSegReg, GCPtrMem, pd80Dst));
7455}
7456#endif
7457
7458
7459/**
7460 * Fetches a data dqword (double qword), generally SSE related.
7461 *
7462 * @returns Strict VBox status code.
7463 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7464 * @param pu128Dst Where to return the qword.
7465 * @param iSegReg The index of the segment register to use for
7466 * this access. The base and limits are checked.
7467 * @param GCPtrMem The address of the guest memory.
7468 */
7469VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7470{
7471 /* The lazy approach for now... */
7472 PCRTUINT128U pu128Src;
7473 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
7474 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7475 if (rc == VINF_SUCCESS)
7476 {
7477 pu128Dst->au64[0] = pu128Src->au64[0];
7478 pu128Dst->au64[1] = pu128Src->au64[1];
7479 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7480 Log9(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7481 }
7482 return rc;
7483}
7484
7485
7486#ifdef IEM_WITH_SETJMP
7487/**
7488 * Fetches a data dqword (double qword), generally SSE related.
7489 *
7490 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7491 * @param pu128Dst Where to return the qword.
7492 * @param iSegReg The index of the segment register to use for
7493 * this access. The base and limits are checked.
7494 * @param GCPtrMem The address of the guest memory.
7495 */
7496void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7497{
7498 /* The lazy approach for now... */
7499 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem,
7500 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7501 pu128Dst->au64[0] = pu128Src->au64[0];
7502 pu128Dst->au64[1] = pu128Src->au64[1];
7503 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7504 Log9(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7505}
7506#endif
7507
7508
7509/**
7510 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7511 * related.
7512 *
7513 * Raises \#GP(0) if not aligned.
7514 *
7515 * @returns Strict VBox status code.
7516 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7517 * @param pu128Dst Where to return the qword.
7518 * @param iSegReg The index of the segment register to use for
7519 * this access. The base and limits are checked.
7520 * @param GCPtrMem The address of the guest memory.
7521 */
7522VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7523{
7524 /* The lazy approach for now... */
7525 PCRTUINT128U pu128Src;
7526 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
7527 IEM_ACCESS_DATA_R, (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7528 if (rc == VINF_SUCCESS)
7529 {
7530 pu128Dst->au64[0] = pu128Src->au64[0];
7531 pu128Dst->au64[1] = pu128Src->au64[1];
7532 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7533 Log9(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7534 }
7535 return rc;
7536}
7537
7538
7539#ifdef IEM_WITH_SETJMP
7540/**
7541 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7542 * related, longjmp on error.
7543 *
7544 * Raises \#GP(0) if not aligned.
7545 *
7546 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7547 * @param pu128Dst Where to return the qword.
7548 * @param iSegReg The index of the segment register to use for
7549 * this access. The base and limits are checked.
7550 * @param GCPtrMem The address of the guest memory.
7551 */
7552void iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg,
7553 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7554{
7555 /* The lazy approach for now... */
7556 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7557 (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7558 pu128Dst->au64[0] = pu128Src->au64[0];
7559 pu128Dst->au64[1] = pu128Src->au64[1];
7560 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7561 Log9(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7562}
7563#endif
7564
7565
7566/**
7567 * Fetches a data oword (octo word), generally AVX related.
7568 *
7569 * @returns Strict VBox status code.
7570 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7571 * @param pu256Dst Where to return the qword.
7572 * @param iSegReg The index of the segment register to use for
7573 * this access. The base and limits are checked.
7574 * @param GCPtrMem The address of the guest memory.
7575 */
7576VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7577{
7578 /* The lazy approach for now... */
7579 PCRTUINT256U pu256Src;
7580 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7581 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7582 if (rc == VINF_SUCCESS)
7583 {
7584 pu256Dst->au64[0] = pu256Src->au64[0];
7585 pu256Dst->au64[1] = pu256Src->au64[1];
7586 pu256Dst->au64[2] = pu256Src->au64[2];
7587 pu256Dst->au64[3] = pu256Src->au64[3];
7588 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7589 Log9(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7590 }
7591 return rc;
7592}
7593
7594
7595#ifdef IEM_WITH_SETJMP
7596/**
7597 * Fetches a data oword (octo word), generally AVX related.
7598 *
7599 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7600 * @param pu256Dst Where to return the qword.
7601 * @param iSegReg The index of the segment register to use for
7602 * this access. The base and limits are checked.
7603 * @param GCPtrMem The address of the guest memory.
7604 */
7605void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7606{
7607 /* The lazy approach for now... */
7608 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem,
7609 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7610 pu256Dst->au64[0] = pu256Src->au64[0];
7611 pu256Dst->au64[1] = pu256Src->au64[1];
7612 pu256Dst->au64[2] = pu256Src->au64[2];
7613 pu256Dst->au64[3] = pu256Src->au64[3];
7614 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7615 Log9(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7616}
7617#endif
7618
7619
7620/**
7621 * Fetches a data oword (octo word) at an aligned address, generally AVX
7622 * related.
7623 *
7624 * Raises \#GP(0) if not aligned.
7625 *
7626 * @returns Strict VBox status code.
7627 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7628 * @param pu256Dst Where to return the qword.
7629 * @param iSegReg The index of the segment register to use for
7630 * this access. The base and limits are checked.
7631 * @param GCPtrMem The address of the guest memory.
7632 */
7633VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7634{
7635 /* The lazy approach for now... */
7636 PCRTUINT256U pu256Src;
7637 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7638 IEM_ACCESS_DATA_R, (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7639 if (rc == VINF_SUCCESS)
7640 {
7641 pu256Dst->au64[0] = pu256Src->au64[0];
7642 pu256Dst->au64[1] = pu256Src->au64[1];
7643 pu256Dst->au64[2] = pu256Src->au64[2];
7644 pu256Dst->au64[3] = pu256Src->au64[3];
7645 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7646 Log9(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7647 }
7648 return rc;
7649}
7650
7651
7652#ifdef IEM_WITH_SETJMP
7653/**
7654 * Fetches a data oword (octo word) at an aligned address, generally AVX
7655 * related, longjmp on error.
7656 *
7657 * Raises \#GP(0) if not aligned.
7658 *
7659 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7660 * @param pu256Dst Where to return the qword.
7661 * @param iSegReg The index of the segment register to use for
7662 * this access. The base and limits are checked.
7663 * @param GCPtrMem The address of the guest memory.
7664 */
7665void iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg,
7666 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7667{
7668 /* The lazy approach for now... */
7669 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7670 (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7671 pu256Dst->au64[0] = pu256Src->au64[0];
7672 pu256Dst->au64[1] = pu256Src->au64[1];
7673 pu256Dst->au64[2] = pu256Src->au64[2];
7674 pu256Dst->au64[3] = pu256Src->au64[3];
7675 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7676 Log9(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7677}
7678#endif
7679
7680
7681
7682/**
7683 * Fetches a descriptor register (lgdt, lidt).
7684 *
7685 * @returns Strict VBox status code.
7686 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7687 * @param pcbLimit Where to return the limit.
7688 * @param pGCPtrBase Where to return the base.
7689 * @param iSegReg The index of the segment register to use for
7690 * this access. The base and limits are checked.
7691 * @param GCPtrMem The address of the guest memory.
7692 * @param enmOpSize The effective operand size.
7693 */
7694VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7695 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7696{
7697 /*
7698 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7699 * little special:
7700 * - The two reads are done separately.
7701 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7702 * - We suspect the 386 to actually commit the limit before the base in
7703 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7704 * don't try emulate this eccentric behavior, because it's not well
7705 * enough understood and rather hard to trigger.
7706 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7707 */
7708 VBOXSTRICTRC rcStrict;
7709 if (IEM_IS_64BIT_CODE(pVCpu))
7710 {
7711 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7712 if (rcStrict == VINF_SUCCESS)
7713 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7714 }
7715 else
7716 {
7717 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7718 if (enmOpSize == IEMMODE_32BIT)
7719 {
7720 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7721 {
7722 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7723 if (rcStrict == VINF_SUCCESS)
7724 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7725 }
7726 else
7727 {
7728 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7729 if (rcStrict == VINF_SUCCESS)
7730 {
7731 *pcbLimit = (uint16_t)uTmp;
7732 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7733 }
7734 }
7735 if (rcStrict == VINF_SUCCESS)
7736 *pGCPtrBase = uTmp;
7737 }
7738 else
7739 {
7740 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7741 if (rcStrict == VINF_SUCCESS)
7742 {
7743 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7744 if (rcStrict == VINF_SUCCESS)
7745 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7746 }
7747 }
7748 }
7749 return rcStrict;
7750}
7751
7752
7753
7754/**
7755 * Stores a data byte.
7756 *
7757 * @returns Strict VBox status code.
7758 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7759 * @param iSegReg The index of the segment register to use for
7760 * this access. The base and limits are checked.
7761 * @param GCPtrMem The address of the guest memory.
7762 * @param u8Value The value to store.
7763 */
7764VBOXSTRICTRC iemMemStoreDataU8(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) RT_NOEXCEPT
7765{
7766 /* The lazy approach for now... */
7767 uint8_t *pu8Dst;
7768 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 0);
7769 if (rc == VINF_SUCCESS)
7770 {
7771 *pu8Dst = u8Value;
7772 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7773 Log8(("IEM WR byte %d|%RGv: %#04x\n", iSegReg, GCPtrMem, u8Value));
7774 }
7775 return rc;
7776}
7777
7778
7779#ifdef IEM_WITH_SETJMP
7780/**
7781 * Stores a data byte, longjmp on error.
7782 *
7783 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7784 * @param iSegReg The index of the segment register to use for
7785 * this access. The base and limits are checked.
7786 * @param GCPtrMem The address of the guest memory.
7787 * @param u8Value The value to store.
7788 */
7789void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) IEM_NOEXCEPT_MAY_LONGJMP
7790{
7791 /* The lazy approach for now... */
7792 Log8(("IEM WR byte %d|%RGv: %#04x\n", iSegReg, GCPtrMem, u8Value));
7793 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 0);
7794 *pu8Dst = u8Value;
7795 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7796}
7797#endif
7798
7799
7800/**
7801 * Stores a data word.
7802 *
7803 * @returns Strict VBox status code.
7804 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7805 * @param iSegReg The index of the segment register to use for
7806 * this access. The base and limits are checked.
7807 * @param GCPtrMem The address of the guest memory.
7808 * @param u16Value The value to store.
7809 */
7810VBOXSTRICTRC iemMemStoreDataU16(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) RT_NOEXCEPT
7811{
7812 /* The lazy approach for now... */
7813 uint16_t *pu16Dst;
7814 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem,
7815 IEM_ACCESS_DATA_W, sizeof(*pu16Dst) - 1);
7816 if (rc == VINF_SUCCESS)
7817 {
7818 *pu16Dst = u16Value;
7819 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7820 Log8(("IEM WR word %d|%RGv: %#06x\n", iSegReg, GCPtrMem, u16Value));
7821 }
7822 return rc;
7823}
7824
7825
7826#ifdef IEM_WITH_SETJMP
7827/**
7828 * Stores a data word, longjmp on error.
7829 *
7830 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7831 * @param iSegReg The index of the segment register to use for
7832 * this access. The base and limits are checked.
7833 * @param GCPtrMem The address of the guest memory.
7834 * @param u16Value The value to store.
7835 */
7836void iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) IEM_NOEXCEPT_MAY_LONGJMP
7837{
7838 /* The lazy approach for now... */
7839 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem,
7840 IEM_ACCESS_DATA_W, sizeof(*pu16Dst) - 1);
7841 *pu16Dst = u16Value;
7842 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7843 Log8(("IEM WR word %d|%RGv: %#06x\n", iSegReg, GCPtrMem, u16Value));
7844}
7845#endif
7846
7847
7848/**
7849 * Stores a data dword.
7850 *
7851 * @returns Strict VBox status code.
7852 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7853 * @param iSegReg The index of the segment register to use for
7854 * this access. The base and limits are checked.
7855 * @param GCPtrMem The address of the guest memory.
7856 * @param u32Value The value to store.
7857 */
7858VBOXSTRICTRC iemMemStoreDataU32(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) RT_NOEXCEPT
7859{
7860 /* The lazy approach for now... */
7861 uint32_t *pu32Dst;
7862 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem,
7863 IEM_ACCESS_DATA_W, sizeof(*pu32Dst) - 1);
7864 if (rc == VINF_SUCCESS)
7865 {
7866 *pu32Dst = u32Value;
7867 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7868 Log8(("IEM WR dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, u32Value));
7869 }
7870 return rc;
7871}
7872
7873
7874#ifdef IEM_WITH_SETJMP
7875/**
7876 * Stores a data dword.
7877 *
7878 * @returns Strict VBox status code.
7879 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7880 * @param iSegReg The index of the segment register to use for
7881 * this access. The base and limits are checked.
7882 * @param GCPtrMem The address of the guest memory.
7883 * @param u32Value The value to store.
7884 */
7885void iemMemStoreDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) IEM_NOEXCEPT_MAY_LONGJMP
7886{
7887 /* The lazy approach for now... */
7888 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem,
7889 IEM_ACCESS_DATA_W, sizeof(*pu32Dst) - 1);
7890 *pu32Dst = u32Value;
7891 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7892 Log8(("IEM WR dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, u32Value));
7893}
7894#endif
7895
7896
7897/**
7898 * Stores a data qword.
7899 *
7900 * @returns Strict VBox status code.
7901 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7902 * @param iSegReg The index of the segment register to use for
7903 * this access. The base and limits are checked.
7904 * @param GCPtrMem The address of the guest memory.
7905 * @param u64Value The value to store.
7906 */
7907VBOXSTRICTRC iemMemStoreDataU64(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) RT_NOEXCEPT
7908{
7909 /* The lazy approach for now... */
7910 uint64_t *pu64Dst;
7911 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem,
7912 IEM_ACCESS_DATA_W, sizeof(*pu64Dst) - 1);
7913 if (rc == VINF_SUCCESS)
7914 {
7915 *pu64Dst = u64Value;
7916 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7917 Log8(("IEM WR qword %d|%RGv: %#018RX64\n", iSegReg, GCPtrMem, u64Value));
7918 }
7919 return rc;
7920}
7921
7922
7923#ifdef IEM_WITH_SETJMP
7924/**
7925 * Stores a data qword, longjmp on error.
7926 *
7927 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7928 * @param iSegReg The index of the segment register to use for
7929 * this access. The base and limits are checked.
7930 * @param GCPtrMem The address of the guest memory.
7931 * @param u64Value The value to store.
7932 */
7933void iemMemStoreDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) IEM_NOEXCEPT_MAY_LONGJMP
7934{
7935 /* The lazy approach for now... */
7936 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem,
7937 IEM_ACCESS_DATA_W, sizeof(*pu64Dst) - 1);
7938 *pu64Dst = u64Value;
7939 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7940 Log8(("IEM WR qword %d|%RGv: %#018RX64\n", iSegReg, GCPtrMem, u64Value));
7941}
7942#endif
7943
7944
7945/**
7946 * Stores a data dqword.
7947 *
7948 * @returns Strict VBox status code.
7949 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7950 * @param iSegReg The index of the segment register to use for
7951 * this access. The base and limits are checked.
7952 * @param GCPtrMem The address of the guest memory.
7953 * @param u128Value The value to store.
7954 */
7955VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7956{
7957 /* The lazy approach for now... */
7958 PRTUINT128U pu128Dst;
7959 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7960 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7961 if (rc == VINF_SUCCESS)
7962 {
7963 pu128Dst->au64[0] = u128Value.au64[0];
7964 pu128Dst->au64[1] = u128Value.au64[1];
7965 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7966 Log8(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7967 }
7968 return rc;
7969}
7970
7971
7972#ifdef IEM_WITH_SETJMP
7973/**
7974 * Stores a data dqword, longjmp on error.
7975 *
7976 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7977 * @param iSegReg The index of the segment register to use for
7978 * this access. The base and limits are checked.
7979 * @param GCPtrMem The address of the guest memory.
7980 * @param u128Value The value to store.
7981 */
7982void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7983{
7984 /* The lazy approach for now... */
7985 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7986 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7987 pu128Dst->au64[0] = u128Value.au64[0];
7988 pu128Dst->au64[1] = u128Value.au64[1];
7989 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7990 Log8(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7991}
7992#endif
7993
7994
7995/**
7996 * Stores a data dqword, SSE aligned.
7997 *
7998 * @returns Strict VBox status code.
7999 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8000 * @param iSegReg The index of the segment register to use for
8001 * this access. The base and limits are checked.
8002 * @param GCPtrMem The address of the guest memory.
8003 * @param u128Value The value to store.
8004 */
8005VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
8006{
8007 /* The lazy approach for now... */
8008 PRTUINT128U pu128Dst;
8009 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
8010 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
8011 if (rc == VINF_SUCCESS)
8012 {
8013 pu128Dst->au64[0] = u128Value.au64[0];
8014 pu128Dst->au64[1] = u128Value.au64[1];
8015 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
8016 Log8(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
8017 }
8018 return rc;
8019}
8020
8021
8022#ifdef IEM_WITH_SETJMP
8023/**
8024 * Stores a data dqword, SSE aligned.
8025 *
8026 * @returns Strict VBox status code.
8027 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8028 * @param iSegReg The index of the segment register to use for
8029 * this access. The base and limits are checked.
8030 * @param GCPtrMem The address of the guest memory.
8031 * @param u128Value The value to store.
8032 */
8033void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
8034 RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
8035{
8036 /* The lazy approach for now... */
8037 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
8038 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
8039 pu128Dst->au64[0] = u128Value.au64[0];
8040 pu128Dst->au64[1] = u128Value.au64[1];
8041 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
8042 Log8(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
8043}
8044#endif
8045
8046
8047/**
8048 * Stores a data dqword.
8049 *
8050 * @returns Strict VBox status code.
8051 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8052 * @param iSegReg The index of the segment register to use for
8053 * this access. The base and limits are checked.
8054 * @param GCPtrMem The address of the guest memory.
8055 * @param pu256Value Pointer to the value to store.
8056 */
8057VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
8058{
8059 /* The lazy approach for now... */
8060 PRTUINT256U pu256Dst;
8061 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
8062 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
8063 if (rc == VINF_SUCCESS)
8064 {
8065 pu256Dst->au64[0] = pu256Value->au64[0];
8066 pu256Dst->au64[1] = pu256Value->au64[1];
8067 pu256Dst->au64[2] = pu256Value->au64[2];
8068 pu256Dst->au64[3] = pu256Value->au64[3];
8069 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
8070 Log8(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
8071 }
8072 return rc;
8073}
8074
8075
8076#ifdef IEM_WITH_SETJMP
8077/**
8078 * Stores a data dqword, longjmp on error.
8079 *
8080 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8081 * @param iSegReg The index of the segment register to use for
8082 * this access. The base and limits are checked.
8083 * @param GCPtrMem The address of the guest memory.
8084 * @param pu256Value Pointer to the value to store.
8085 */
8086void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
8087{
8088 /* The lazy approach for now... */
8089 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
8090 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
8091 pu256Dst->au64[0] = pu256Value->au64[0];
8092 pu256Dst->au64[1] = pu256Value->au64[1];
8093 pu256Dst->au64[2] = pu256Value->au64[2];
8094 pu256Dst->au64[3] = pu256Value->au64[3];
8095 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
8096 Log8(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
8097}
8098#endif
8099
8100
8101/**
8102 * Stores a data dqword, AVX \#GP(0) aligned.
8103 *
8104 * @returns Strict VBox status code.
8105 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8106 * @param iSegReg The index of the segment register to use for
8107 * this access. The base and limits are checked.
8108 * @param GCPtrMem The address of the guest memory.
8109 * @param pu256Value Pointer to the value to store.
8110 */
8111VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
8112{
8113 /* The lazy approach for now... */
8114 PRTUINT256U pu256Dst;
8115 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
8116 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
8117 if (rc == VINF_SUCCESS)
8118 {
8119 pu256Dst->au64[0] = pu256Value->au64[0];
8120 pu256Dst->au64[1] = pu256Value->au64[1];
8121 pu256Dst->au64[2] = pu256Value->au64[2];
8122 pu256Dst->au64[3] = pu256Value->au64[3];
8123 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
8124 Log8(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
8125 }
8126 return rc;
8127}
8128
8129
8130#ifdef IEM_WITH_SETJMP
8131/**
8132 * Stores a data dqword, AVX aligned.
8133 *
8134 * @returns Strict VBox status code.
8135 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8136 * @param iSegReg The index of the segment register to use for
8137 * this access. The base and limits are checked.
8138 * @param GCPtrMem The address of the guest memory.
8139 * @param pu256Value Pointer to the value to store.
8140 */
8141void iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
8142 PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
8143{
8144 /* The lazy approach for now... */
8145 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
8146 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
8147 pu256Dst->au64[0] = pu256Value->au64[0];
8148 pu256Dst->au64[1] = pu256Value->au64[1];
8149 pu256Dst->au64[2] = pu256Value->au64[2];
8150 pu256Dst->au64[3] = pu256Value->au64[3];
8151 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
8152 Log8(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
8153}
8154#endif
8155
8156
8157/**
8158 * Stores a descriptor register (sgdt, sidt).
8159 *
8160 * @returns Strict VBox status code.
8161 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8162 * @param cbLimit The limit.
8163 * @param GCPtrBase The base address.
8164 * @param iSegReg The index of the segment register to use for
8165 * this access. The base and limits are checked.
8166 * @param GCPtrMem The address of the guest memory.
8167 */
8168VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8169{
8170 /*
8171 * The SIDT and SGDT instructions actually stores the data using two
8172 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
8173 * does not respond to opsize prefixes.
8174 */
8175 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
8176 if (rcStrict == VINF_SUCCESS)
8177 {
8178 if (IEM_IS_16BIT_CODE(pVCpu))
8179 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
8180 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
8181 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
8182 else if (IEM_IS_32BIT_CODE(pVCpu))
8183 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
8184 else
8185 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
8186 }
8187 return rcStrict;
8188}
8189
8190
8191/**
8192 * Pushes a word onto the stack.
8193 *
8194 * @returns Strict VBox status code.
8195 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8196 * @param u16Value The value to push.
8197 */
8198VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value) RT_NOEXCEPT
8199{
8200 /* Increment the stack pointer. */
8201 uint64_t uNewRsp;
8202 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
8203
8204 /* Write the word the lazy way. */
8205 uint16_t *pu16Dst;
8206 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop,
8207 IEM_ACCESS_STACK_W, sizeof(*pu16Dst) - 1);
8208 if (rc == VINF_SUCCESS)
8209 {
8210 *pu16Dst = u16Value;
8211 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
8212 }
8213
8214 /* Commit the new RSP value unless we an access handler made trouble. */
8215 if (rc == VINF_SUCCESS)
8216 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8217
8218 return rc;
8219}
8220
8221
8222/**
8223 * Pushes a dword onto the stack.
8224 *
8225 * @returns Strict VBox status code.
8226 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8227 * @param u32Value The value to push.
8228 */
8229VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
8230{
8231 /* Increment the stack pointer. */
8232 uint64_t uNewRsp;
8233 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
8234
8235 /* Write the dword the lazy way. */
8236 uint32_t *pu32Dst;
8237 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop,
8238 IEM_ACCESS_STACK_W, sizeof(*pu32Dst) - 1);
8239 if (rc == VINF_SUCCESS)
8240 {
8241 *pu32Dst = u32Value;
8242 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
8243 }
8244
8245 /* Commit the new RSP value unless we an access handler made trouble. */
8246 if (rc == VINF_SUCCESS)
8247 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8248
8249 return rc;
8250}
8251
8252
8253/**
8254 * Pushes a dword segment register value onto the stack.
8255 *
8256 * @returns Strict VBox status code.
8257 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8258 * @param u32Value The value to push.
8259 */
8260VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
8261{
8262 /* Increment the stack pointer. */
8263 uint64_t uNewRsp;
8264 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
8265
8266 /* The intel docs talks about zero extending the selector register
8267 value. My actual intel CPU here might be zero extending the value
8268 but it still only writes the lower word... */
8269 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
8270 * happens when crossing an electric page boundrary, is the high word checked
8271 * for write accessibility or not? Probably it is. What about segment limits?
8272 * It appears this behavior is also shared with trap error codes.
8273 *
8274 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
8275 * ancient hardware when it actually did change. */
8276 uint16_t *pu16Dst;
8277 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop,
8278 IEM_ACCESS_STACK_RW, sizeof(*pu16Dst) - 1); /** @todo 2 or 4 alignment check for PUSH SS? */
8279 if (rc == VINF_SUCCESS)
8280 {
8281 *pu16Dst = (uint16_t)u32Value;
8282 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
8283 }
8284
8285 /* Commit the new RSP value unless we an access handler made trouble. */
8286 if (rc == VINF_SUCCESS)
8287 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8288
8289 return rc;
8290}
8291
8292
8293/**
8294 * Pushes a qword onto the stack.
8295 *
8296 * @returns Strict VBox status code.
8297 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8298 * @param u64Value The value to push.
8299 */
8300VBOXSTRICTRC iemMemStackPushU64(PVMCPUCC pVCpu, uint64_t u64Value) RT_NOEXCEPT
8301{
8302 /* Increment the stack pointer. */
8303 uint64_t uNewRsp;
8304 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
8305
8306 /* Write the word the lazy way. */
8307 uint64_t *pu64Dst;
8308 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop,
8309 IEM_ACCESS_STACK_W, sizeof(*pu64Dst) - 1);
8310 if (rc == VINF_SUCCESS)
8311 {
8312 *pu64Dst = u64Value;
8313 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
8314 }
8315
8316 /* Commit the new RSP value unless we an access handler made trouble. */
8317 if (rc == VINF_SUCCESS)
8318 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8319
8320 return rc;
8321}
8322
8323
8324/**
8325 * Pops a word from the stack.
8326 *
8327 * @returns Strict VBox status code.
8328 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8329 * @param pu16Value Where to store the popped value.
8330 */
8331VBOXSTRICTRC iemMemStackPopU16(PVMCPUCC pVCpu, uint16_t *pu16Value) RT_NOEXCEPT
8332{
8333 /* Increment the stack pointer. */
8334 uint64_t uNewRsp;
8335 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
8336
8337 /* Write the word the lazy way. */
8338 uint16_t const *pu16Src;
8339 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop,
8340 IEM_ACCESS_STACK_R, sizeof(*pu16Src) - 1);
8341 if (rc == VINF_SUCCESS)
8342 {
8343 *pu16Value = *pu16Src;
8344 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
8345
8346 /* Commit the new RSP value. */
8347 if (rc == VINF_SUCCESS)
8348 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8349 }
8350
8351 return rc;
8352}
8353
8354
8355/**
8356 * Pops a dword from the stack.
8357 *
8358 * @returns Strict VBox status code.
8359 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8360 * @param pu32Value Where to store the popped value.
8361 */
8362VBOXSTRICTRC iemMemStackPopU32(PVMCPUCC pVCpu, uint32_t *pu32Value) RT_NOEXCEPT
8363{
8364 /* Increment the stack pointer. */
8365 uint64_t uNewRsp;
8366 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
8367
8368 /* Write the word the lazy way. */
8369 uint32_t const *pu32Src;
8370 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop,
8371 IEM_ACCESS_STACK_R, sizeof(*pu32Src) - 1);
8372 if (rc == VINF_SUCCESS)
8373 {
8374 *pu32Value = *pu32Src;
8375 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
8376
8377 /* Commit the new RSP value. */
8378 if (rc == VINF_SUCCESS)
8379 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8380 }
8381
8382 return rc;
8383}
8384
8385
8386/**
8387 * Pops a qword from the stack.
8388 *
8389 * @returns Strict VBox status code.
8390 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8391 * @param pu64Value Where to store the popped value.
8392 */
8393VBOXSTRICTRC iemMemStackPopU64(PVMCPUCC pVCpu, uint64_t *pu64Value) RT_NOEXCEPT
8394{
8395 /* Increment the stack pointer. */
8396 uint64_t uNewRsp;
8397 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
8398
8399 /* Write the word the lazy way. */
8400 uint64_t const *pu64Src;
8401 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop,
8402 IEM_ACCESS_STACK_R, sizeof(*pu64Src) - 1);
8403 if (rc == VINF_SUCCESS)
8404 {
8405 *pu64Value = *pu64Src;
8406 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8407
8408 /* Commit the new RSP value. */
8409 if (rc == VINF_SUCCESS)
8410 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8411 }
8412
8413 return rc;
8414}
8415
8416
8417/**
8418 * Pushes a word onto the stack, using a temporary stack pointer.
8419 *
8420 * @returns Strict VBox status code.
8421 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8422 * @param u16Value The value to push.
8423 * @param pTmpRsp Pointer to the temporary stack pointer.
8424 */
8425VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPUCC pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8426{
8427 /* Increment the stack pointer. */
8428 RTUINT64U NewRsp = *pTmpRsp;
8429 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
8430
8431 /* Write the word the lazy way. */
8432 uint16_t *pu16Dst;
8433 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop,
8434 IEM_ACCESS_STACK_W, sizeof(*pu16Dst) - 1);
8435 if (rc == VINF_SUCCESS)
8436 {
8437 *pu16Dst = u16Value;
8438 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
8439 }
8440
8441 /* Commit the new RSP value unless we an access handler made trouble. */
8442 if (rc == VINF_SUCCESS)
8443 *pTmpRsp = NewRsp;
8444
8445 return rc;
8446}
8447
8448
8449/**
8450 * Pushes a dword onto the stack, using a temporary stack pointer.
8451 *
8452 * @returns Strict VBox status code.
8453 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8454 * @param u32Value The value to push.
8455 * @param pTmpRsp Pointer to the temporary stack pointer.
8456 */
8457VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPUCC pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8458{
8459 /* Increment the stack pointer. */
8460 RTUINT64U NewRsp = *pTmpRsp;
8461 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
8462
8463 /* Write the word the lazy way. */
8464 uint32_t *pu32Dst;
8465 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop,
8466 IEM_ACCESS_STACK_W, sizeof(*pu32Dst) - 1);
8467 if (rc == VINF_SUCCESS)
8468 {
8469 *pu32Dst = u32Value;
8470 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
8471 }
8472
8473 /* Commit the new RSP value unless we an access handler made trouble. */
8474 if (rc == VINF_SUCCESS)
8475 *pTmpRsp = NewRsp;
8476
8477 return rc;
8478}
8479
8480
8481/**
8482 * Pushes a dword onto the stack, using a temporary stack pointer.
8483 *
8484 * @returns Strict VBox status code.
8485 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8486 * @param u64Value The value to push.
8487 * @param pTmpRsp Pointer to the temporary stack pointer.
8488 */
8489VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPUCC pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8490{
8491 /* Increment the stack pointer. */
8492 RTUINT64U NewRsp = *pTmpRsp;
8493 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
8494
8495 /* Write the word the lazy way. */
8496 uint64_t *pu64Dst;
8497 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop,
8498 IEM_ACCESS_STACK_W, sizeof(*pu64Dst) - 1);
8499 if (rc == VINF_SUCCESS)
8500 {
8501 *pu64Dst = u64Value;
8502 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
8503 }
8504
8505 /* Commit the new RSP value unless we an access handler made trouble. */
8506 if (rc == VINF_SUCCESS)
8507 *pTmpRsp = NewRsp;
8508
8509 return rc;
8510}
8511
8512
8513/**
8514 * Pops a word from the stack, using a temporary stack pointer.
8515 *
8516 * @returns Strict VBox status code.
8517 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8518 * @param pu16Value Where to store the popped value.
8519 * @param pTmpRsp Pointer to the temporary stack pointer.
8520 */
8521VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPUCC pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8522{
8523 /* Increment the stack pointer. */
8524 RTUINT64U NewRsp = *pTmpRsp;
8525 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
8526
8527 /* Write the word the lazy way. */
8528 uint16_t const *pu16Src;
8529 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop,
8530 IEM_ACCESS_STACK_R, sizeof(*pu16Src) - 1);
8531 if (rc == VINF_SUCCESS)
8532 {
8533 *pu16Value = *pu16Src;
8534 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
8535
8536 /* Commit the new RSP value. */
8537 if (rc == VINF_SUCCESS)
8538 *pTmpRsp = NewRsp;
8539 }
8540
8541 return rc;
8542}
8543
8544
8545/**
8546 * Pops a dword from the stack, using a temporary stack pointer.
8547 *
8548 * @returns Strict VBox status code.
8549 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8550 * @param pu32Value Where to store the popped value.
8551 * @param pTmpRsp Pointer to the temporary stack pointer.
8552 */
8553VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPUCC pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8554{
8555 /* Increment the stack pointer. */
8556 RTUINT64U NewRsp = *pTmpRsp;
8557 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
8558
8559 /* Write the word the lazy way. */
8560 uint32_t const *pu32Src;
8561 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop,
8562 IEM_ACCESS_STACK_R, sizeof(*pu32Src) - 1);
8563 if (rc == VINF_SUCCESS)
8564 {
8565 *pu32Value = *pu32Src;
8566 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
8567
8568 /* Commit the new RSP value. */
8569 if (rc == VINF_SUCCESS)
8570 *pTmpRsp = NewRsp;
8571 }
8572
8573 return rc;
8574}
8575
8576
8577/**
8578 * Pops a qword from the stack, using a temporary stack pointer.
8579 *
8580 * @returns Strict VBox status code.
8581 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8582 * @param pu64Value Where to store the popped value.
8583 * @param pTmpRsp Pointer to the temporary stack pointer.
8584 */
8585VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPUCC pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8586{
8587 /* Increment the stack pointer. */
8588 RTUINT64U NewRsp = *pTmpRsp;
8589 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
8590
8591 /* Write the word the lazy way. */
8592 uint64_t const *pu64Src;
8593 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop,
8594 IEM_ACCESS_STACK_R, sizeof(*pu64Src) - 1);
8595 if (rcStrict == VINF_SUCCESS)
8596 {
8597 *pu64Value = *pu64Src;
8598 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8599
8600 /* Commit the new RSP value. */
8601 if (rcStrict == VINF_SUCCESS)
8602 *pTmpRsp = NewRsp;
8603 }
8604
8605 return rcStrict;
8606}
8607
8608
8609/**
8610 * Begin a special stack push (used by interrupt, exceptions and such).
8611 *
8612 * This will raise \#SS or \#PF if appropriate.
8613 *
8614 * @returns Strict VBox status code.
8615 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8616 * @param cbMem The number of bytes to push onto the stack.
8617 * @param cbAlign The alignment mask (7, 3, 1).
8618 * @param ppvMem Where to return the pointer to the stack memory.
8619 * As with the other memory functions this could be
8620 * direct access or bounce buffered access, so
8621 * don't commit register until the commit call
8622 * succeeds.
8623 * @param puNewRsp Where to return the new RSP value. This must be
8624 * passed unchanged to
8625 * iemMemStackPushCommitSpecial().
8626 */
8627VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8628 void **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8629{
8630 Assert(cbMem < UINT8_MAX);
8631 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
8632 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop,
8633 IEM_ACCESS_STACK_W, cbAlign);
8634}
8635
8636
8637/**
8638 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
8639 *
8640 * This will update the rSP.
8641 *
8642 * @returns Strict VBox status code.
8643 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8644 * @param pvMem The pointer returned by
8645 * iemMemStackPushBeginSpecial().
8646 * @param uNewRsp The new RSP value returned by
8647 * iemMemStackPushBeginSpecial().
8648 */
8649VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp) RT_NOEXCEPT
8650{
8651 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
8652 if (rcStrict == VINF_SUCCESS)
8653 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8654 return rcStrict;
8655}
8656
8657
8658/**
8659 * Begin a special stack pop (used by iret, retf and such).
8660 *
8661 * This will raise \#SS or \#PF if appropriate.
8662 *
8663 * @returns Strict VBox status code.
8664 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8665 * @param cbMem The number of bytes to pop from the stack.
8666 * @param cbAlign The alignment mask (7, 3, 1).
8667 * @param ppvMem Where to return the pointer to the stack memory.
8668 * @param puNewRsp Where to return the new RSP value. This must be
8669 * assigned to CPUMCTX::rsp manually some time
8670 * after iemMemStackPopDoneSpecial() has been
8671 * called.
8672 */
8673VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8674 void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8675{
8676 Assert(cbMem < UINT8_MAX);
8677 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
8678 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
8679}
8680
8681
8682/**
8683 * Continue a special stack pop (used by iret and retf), for the purpose of
8684 * retrieving a new stack pointer.
8685 *
8686 * This will raise \#SS or \#PF if appropriate.
8687 *
8688 * @returns Strict VBox status code.
8689 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8690 * @param off Offset from the top of the stack. This is zero
8691 * except in the retf case.
8692 * @param cbMem The number of bytes to pop from the stack.
8693 * @param ppvMem Where to return the pointer to the stack memory.
8694 * @param uCurNewRsp The current uncommitted RSP value. (No need to
8695 * return this because all use of this function is
8696 * to retrieve a new value and anything we return
8697 * here would be discarded.)
8698 */
8699VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
8700 void const **ppvMem, uint64_t uCurNewRsp) RT_NOEXCEPT
8701{
8702 Assert(cbMem < UINT8_MAX);
8703
8704 /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */
8705 RTGCPTR GCPtrTop;
8706 if (IEM_IS_64BIT_CODE(pVCpu))
8707 GCPtrTop = uCurNewRsp;
8708 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
8709 GCPtrTop = (uint32_t)uCurNewRsp;
8710 else
8711 GCPtrTop = (uint16_t)uCurNewRsp;
8712
8713 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
8714 0 /* checked in iemMemStackPopBeginSpecial */);
8715}
8716
8717
8718/**
8719 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
8720 * iemMemStackPopContinueSpecial).
8721 *
8722 * The caller will manually commit the rSP.
8723 *
8724 * @returns Strict VBox status code.
8725 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8726 * @param pvMem The pointer returned by
8727 * iemMemStackPopBeginSpecial() or
8728 * iemMemStackPopContinueSpecial().
8729 */
8730VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem) RT_NOEXCEPT
8731{
8732 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8733}
8734
8735
8736/**
8737 * Fetches a system table byte.
8738 *
8739 * @returns Strict VBox status code.
8740 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8741 * @param pbDst Where to return the byte.
8742 * @param iSegReg The index of the segment register to use for
8743 * this access. The base and limits are checked.
8744 * @param GCPtrMem The address of the guest memory.
8745 */
8746VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8747{
8748 /* The lazy approach for now... */
8749 uint8_t const *pbSrc;
8750 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8751 if (rc == VINF_SUCCESS)
8752 {
8753 *pbDst = *pbSrc;
8754 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
8755 }
8756 return rc;
8757}
8758
8759
8760/**
8761 * Fetches a system table word.
8762 *
8763 * @returns Strict VBox status code.
8764 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8765 * @param pu16Dst Where to return the word.
8766 * @param iSegReg The index of the segment register to use for
8767 * this access. The base and limits are checked.
8768 * @param GCPtrMem The address of the guest memory.
8769 */
8770VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8771{
8772 /* The lazy approach for now... */
8773 uint16_t const *pu16Src;
8774 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8775 if (rc == VINF_SUCCESS)
8776 {
8777 *pu16Dst = *pu16Src;
8778 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
8779 }
8780 return rc;
8781}
8782
8783
8784/**
8785 * Fetches a system table dword.
8786 *
8787 * @returns Strict VBox status code.
8788 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8789 * @param pu32Dst Where to return the dword.
8790 * @param iSegReg The index of the segment register to use for
8791 * this access. The base and limits are checked.
8792 * @param GCPtrMem The address of the guest memory.
8793 */
8794VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8795{
8796 /* The lazy approach for now... */
8797 uint32_t const *pu32Src;
8798 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8799 if (rc == VINF_SUCCESS)
8800 {
8801 *pu32Dst = *pu32Src;
8802 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
8803 }
8804 return rc;
8805}
8806
8807
8808/**
8809 * Fetches a system table qword.
8810 *
8811 * @returns Strict VBox status code.
8812 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8813 * @param pu64Dst Where to return the qword.
8814 * @param iSegReg The index of the segment register to use for
8815 * this access. The base and limits are checked.
8816 * @param GCPtrMem The address of the guest memory.
8817 */
8818VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8819{
8820 /* The lazy approach for now... */
8821 uint64_t const *pu64Src;
8822 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8823 if (rc == VINF_SUCCESS)
8824 {
8825 *pu64Dst = *pu64Src;
8826 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
8827 }
8828 return rc;
8829}
8830
8831
8832/**
8833 * Fetches a descriptor table entry with caller specified error code.
8834 *
8835 * @returns Strict VBox status code.
8836 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8837 * @param pDesc Where to return the descriptor table entry.
8838 * @param uSel The selector which table entry to fetch.
8839 * @param uXcpt The exception to raise on table lookup error.
8840 * @param uErrorCode The error code associated with the exception.
8841 */
8842static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
8843 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
8844{
8845 AssertPtr(pDesc);
8846 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
8847
8848 /** @todo did the 286 require all 8 bytes to be accessible? */
8849 /*
8850 * Get the selector table base and check bounds.
8851 */
8852 RTGCPTR GCPtrBase;
8853 if (uSel & X86_SEL_LDT)
8854 {
8855 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
8856 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
8857 {
8858 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8859 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
8860 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8861 uErrorCode, 0);
8862 }
8863
8864 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
8865 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
8866 }
8867 else
8868 {
8869 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
8870 {
8871 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
8872 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8873 uErrorCode, 0);
8874 }
8875 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
8876 }
8877
8878 /*
8879 * Read the legacy descriptor and maybe the long mode extensions if
8880 * required.
8881 */
8882 VBOXSTRICTRC rcStrict;
8883 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
8884 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8885 else
8886 {
8887 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
8888 if (rcStrict == VINF_SUCCESS)
8889 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
8890 if (rcStrict == VINF_SUCCESS)
8891 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
8892 if (rcStrict == VINF_SUCCESS)
8893 pDesc->Legacy.au16[3] = 0;
8894 else
8895 return rcStrict;
8896 }
8897
8898 if (rcStrict == VINF_SUCCESS)
8899 {
8900 if ( !IEM_IS_LONG_MODE(pVCpu)
8901 || pDesc->Legacy.Gen.u1DescType)
8902 pDesc->Long.au64[1] = 0;
8903 else if ( (uint32_t)(uSel | X86_SEL_RPL_LDT) + 8
8904 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
8905 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8906 else
8907 {
8908 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8909 /** @todo is this the right exception? */
8910 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8911 }
8912 }
8913 return rcStrict;
8914}
8915
8916
8917/**
8918 * Fetches a descriptor table entry.
8919 *
8920 * @returns Strict VBox status code.
8921 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8922 * @param pDesc Where to return the descriptor table entry.
8923 * @param uSel The selector which table entry to fetch.
8924 * @param uXcpt The exception to raise on table lookup error.
8925 */
8926VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
8927{
8928 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8929}
8930
8931
8932/**
8933 * Marks the selector descriptor as accessed (only non-system descriptors).
8934 *
8935 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8936 * will therefore skip the limit checks.
8937 *
8938 * @returns Strict VBox status code.
8939 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8940 * @param uSel The selector.
8941 */
8942VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
8943{
8944 /*
8945 * Get the selector table base and calculate the entry address.
8946 */
8947 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8948 ? pVCpu->cpum.GstCtx.ldtr.u64Base
8949 : pVCpu->cpum.GstCtx.gdtr.pGdt;
8950 GCPtr += uSel & X86_SEL_MASK;
8951
8952 /*
8953 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8954 * ugly stuff to avoid this. This will make sure it's an atomic access
8955 * as well more or less remove any question about 8-bit or 32-bit accesss.
8956 */
8957 VBOXSTRICTRC rcStrict;
8958 uint32_t volatile *pu32;
8959 if ((GCPtr & 3) == 0)
8960 {
8961 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8962 GCPtr += 2 + 2;
8963 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8964 if (rcStrict != VINF_SUCCESS)
8965 return rcStrict;
8966 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8967 }
8968 else
8969 {
8970 /* The misaligned GDT/LDT case, map the whole thing. */
8971 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8972 if (rcStrict != VINF_SUCCESS)
8973 return rcStrict;
8974 switch ((uintptr_t)pu32 & 3)
8975 {
8976 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8977 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8978 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8979 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8980 }
8981 }
8982
8983 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
8984}
8985
8986/** @} */
8987
8988/** @name Opcode Helpers.
8989 * @{
8990 */
8991
8992/**
8993 * Calculates the effective address of a ModR/M memory operand.
8994 *
8995 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8996 *
8997 * @return Strict VBox status code.
8998 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8999 * @param bRm The ModRM byte.
9000 * @param cbImmAndRspOffset - First byte: The size of any immediate
9001 * following the effective address opcode bytes
9002 * (only for RIP relative addressing).
9003 * - Second byte: RSP displacement (for POP [ESP]).
9004 * @param pGCPtrEff Where to return the effective address.
9005 */
9006VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
9007{
9008 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
9009# define SET_SS_DEF() \
9010 do \
9011 { \
9012 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9013 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
9014 } while (0)
9015
9016 if (!IEM_IS_64BIT_CODE(pVCpu))
9017 {
9018/** @todo Check the effective address size crap! */
9019 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
9020 {
9021 uint16_t u16EffAddr;
9022
9023 /* Handle the disp16 form with no registers first. */
9024 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9025 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9026 else
9027 {
9028 /* Get the displacment. */
9029 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9030 {
9031 case 0: u16EffAddr = 0; break;
9032 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9033 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9034 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
9035 }
9036
9037 /* Add the base and index registers to the disp. */
9038 switch (bRm & X86_MODRM_RM_MASK)
9039 {
9040 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
9041 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
9042 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
9043 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
9044 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
9045 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
9046 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
9047 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
9048 }
9049 }
9050
9051 *pGCPtrEff = u16EffAddr;
9052 }
9053 else
9054 {
9055 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9056 uint32_t u32EffAddr;
9057
9058 /* Handle the disp32 form with no registers first. */
9059 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9060 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9061 else
9062 {
9063 /* Get the register (or SIB) value. */
9064 switch ((bRm & X86_MODRM_RM_MASK))
9065 {
9066 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9067 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9068 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9069 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9070 case 4: /* SIB */
9071 {
9072 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9073
9074 /* Get the index and scale it. */
9075 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9076 {
9077 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9078 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9079 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9080 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9081 case 4: u32EffAddr = 0; /*none */ break;
9082 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9083 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9084 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9085 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9086 }
9087 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9088
9089 /* add base */
9090 switch (bSib & X86_SIB_BASE_MASK)
9091 {
9092 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9093 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9094 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9095 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9096 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9097 case 5:
9098 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9099 {
9100 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9101 SET_SS_DEF();
9102 }
9103 else
9104 {
9105 uint32_t u32Disp;
9106 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9107 u32EffAddr += u32Disp;
9108 }
9109 break;
9110 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9111 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9112 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9113 }
9114 break;
9115 }
9116 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9117 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9118 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9119 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9120 }
9121
9122 /* Get and add the displacement. */
9123 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9124 {
9125 case 0:
9126 break;
9127 case 1:
9128 {
9129 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9130 u32EffAddr += i8Disp;
9131 break;
9132 }
9133 case 2:
9134 {
9135 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9136 u32EffAddr += u32Disp;
9137 break;
9138 }
9139 default:
9140 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
9141 }
9142
9143 }
9144 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9145 *pGCPtrEff = u32EffAddr;
9146 else
9147 {
9148 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9149 *pGCPtrEff = u32EffAddr & UINT16_MAX;
9150 }
9151 }
9152 }
9153 else
9154 {
9155 uint64_t u64EffAddr;
9156
9157 /* Handle the rip+disp32 form with no registers first. */
9158 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9159 {
9160 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9161 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
9162 }
9163 else
9164 {
9165 /* Get the register (or SIB) value. */
9166 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9167 {
9168 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9169 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9170 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9171 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9172 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9173 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9174 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9175 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9176 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9177 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9178 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9179 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9180 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9181 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9182 /* SIB */
9183 case 4:
9184 case 12:
9185 {
9186 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9187
9188 /* Get the index and scale it. */
9189 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9190 {
9191 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9192 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9193 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9194 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9195 case 4: u64EffAddr = 0; /*none */ break;
9196 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9197 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9198 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9199 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9200 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9201 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9202 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9203 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9204 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9205 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9206 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9207 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9208 }
9209 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9210
9211 /* add base */
9212 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9213 {
9214 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9215 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9216 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9217 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9218 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9219 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9220 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9221 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9222 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9223 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9224 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9225 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9226 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9227 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9228 /* complicated encodings */
9229 case 5:
9230 case 13:
9231 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9232 {
9233 if (!pVCpu->iem.s.uRexB)
9234 {
9235 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9236 SET_SS_DEF();
9237 }
9238 else
9239 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9240 }
9241 else
9242 {
9243 uint32_t u32Disp;
9244 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9245 u64EffAddr += (int32_t)u32Disp;
9246 }
9247 break;
9248 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9249 }
9250 break;
9251 }
9252 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9253 }
9254
9255 /* Get and add the displacement. */
9256 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9257 {
9258 case 0:
9259 break;
9260 case 1:
9261 {
9262 int8_t i8Disp;
9263 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9264 u64EffAddr += i8Disp;
9265 break;
9266 }
9267 case 2:
9268 {
9269 uint32_t u32Disp;
9270 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9271 u64EffAddr += (int32_t)u32Disp;
9272 break;
9273 }
9274 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9275 }
9276
9277 }
9278
9279 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9280 *pGCPtrEff = u64EffAddr;
9281 else
9282 {
9283 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9284 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9285 }
9286 }
9287
9288 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
9289 return VINF_SUCCESS;
9290}
9291
9292
9293#ifdef IEM_WITH_SETJMP
9294/**
9295 * Calculates the effective address of a ModR/M memory operand.
9296 *
9297 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9298 *
9299 * May longjmp on internal error.
9300 *
9301 * @return The effective address.
9302 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9303 * @param bRm The ModRM byte.
9304 * @param cbImmAndRspOffset - First byte: The size of any immediate
9305 * following the effective address opcode bytes
9306 * (only for RIP relative addressing).
9307 * - Second byte: RSP displacement (for POP [ESP]).
9308 */
9309RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP
9310{
9311 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
9312# define SET_SS_DEF() \
9313 do \
9314 { \
9315 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9316 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
9317 } while (0)
9318
9319 if (!IEM_IS_64BIT_CODE(pVCpu))
9320 {
9321/** @todo Check the effective address size crap! */
9322 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
9323 {
9324 uint16_t u16EffAddr;
9325
9326 /* Handle the disp16 form with no registers first. */
9327 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9328 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9329 else
9330 {
9331 /* Get the displacment. */
9332 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9333 {
9334 case 0: u16EffAddr = 0; break;
9335 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9336 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9337 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */
9338 }
9339
9340 /* Add the base and index registers to the disp. */
9341 switch (bRm & X86_MODRM_RM_MASK)
9342 {
9343 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
9344 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
9345 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
9346 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
9347 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
9348 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
9349 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
9350 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
9351 }
9352 }
9353
9354 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
9355 return u16EffAddr;
9356 }
9357
9358 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9359 uint32_t u32EffAddr;
9360
9361 /* Handle the disp32 form with no registers first. */
9362 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9363 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9364 else
9365 {
9366 /* Get the register (or SIB) value. */
9367 switch ((bRm & X86_MODRM_RM_MASK))
9368 {
9369 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9370 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9371 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9372 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9373 case 4: /* SIB */
9374 {
9375 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9376
9377 /* Get the index and scale it. */
9378 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9379 {
9380 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9381 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9382 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9383 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9384 case 4: u32EffAddr = 0; /*none */ break;
9385 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9386 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9387 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9388 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9389 }
9390 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9391
9392 /* add base */
9393 switch (bSib & X86_SIB_BASE_MASK)
9394 {
9395 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9396 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9397 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9398 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9399 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9400 case 5:
9401 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9402 {
9403 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9404 SET_SS_DEF();
9405 }
9406 else
9407 {
9408 uint32_t u32Disp;
9409 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9410 u32EffAddr += u32Disp;
9411 }
9412 break;
9413 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9414 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9415 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9416 }
9417 break;
9418 }
9419 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9420 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9421 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9422 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9423 }
9424
9425 /* Get and add the displacement. */
9426 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9427 {
9428 case 0:
9429 break;
9430 case 1:
9431 {
9432 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9433 u32EffAddr += i8Disp;
9434 break;
9435 }
9436 case 2:
9437 {
9438 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9439 u32EffAddr += u32Disp;
9440 break;
9441 }
9442 default:
9443 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */
9444 }
9445 }
9446
9447 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9448 {
9449 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
9450 return u32EffAddr;
9451 }
9452 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9453 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
9454 return u32EffAddr & UINT16_MAX;
9455 }
9456
9457 uint64_t u64EffAddr;
9458
9459 /* Handle the rip+disp32 form with no registers first. */
9460 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9461 {
9462 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9463 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
9464 }
9465 else
9466 {
9467 /* Get the register (or SIB) value. */
9468 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9469 {
9470 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9471 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9472 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9473 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9474 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9475 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9476 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9477 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9478 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9479 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9480 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9481 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9482 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9483 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9484 /* SIB */
9485 case 4:
9486 case 12:
9487 {
9488 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9489
9490 /* Get the index and scale it. */
9491 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9492 {
9493 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9494 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9495 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9496 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9497 case 4: u64EffAddr = 0; /*none */ break;
9498 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9499 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9500 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9501 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9502 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9503 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9504 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9505 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9506 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9507 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9508 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9509 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9510 }
9511 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9512
9513 /* add base */
9514 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9515 {
9516 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9517 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9518 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9519 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9520 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9521 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9522 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9523 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9524 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9525 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9526 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9527 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9528 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9529 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9530 /* complicated encodings */
9531 case 5:
9532 case 13:
9533 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9534 {
9535 if (!pVCpu->iem.s.uRexB)
9536 {
9537 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9538 SET_SS_DEF();
9539 }
9540 else
9541 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9542 }
9543 else
9544 {
9545 uint32_t u32Disp;
9546 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9547 u64EffAddr += (int32_t)u32Disp;
9548 }
9549 break;
9550 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9551 }
9552 break;
9553 }
9554 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9555 }
9556
9557 /* Get and add the displacement. */
9558 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9559 {
9560 case 0:
9561 break;
9562 case 1:
9563 {
9564 int8_t i8Disp;
9565 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9566 u64EffAddr += i8Disp;
9567 break;
9568 }
9569 case 2:
9570 {
9571 uint32_t u32Disp;
9572 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9573 u64EffAddr += (int32_t)u32Disp;
9574 break;
9575 }
9576 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
9577 }
9578
9579 }
9580
9581 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9582 {
9583 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
9584 return u64EffAddr;
9585 }
9586 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9587 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
9588 return u64EffAddr & UINT32_MAX;
9589}
9590#endif /* IEM_WITH_SETJMP */
9591
9592
9593/**
9594 * Calculates the effective address of a ModR/M memory operand, extended version
9595 * for use in the recompilers.
9596 *
9597 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9598 *
9599 * @return Strict VBox status code.
9600 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9601 * @param bRm The ModRM byte.
9602 * @param cbImmAndRspOffset - First byte: The size of any immediate
9603 * following the effective address opcode bytes
9604 * (only for RIP relative addressing).
9605 * - Second byte: RSP displacement (for POP [ESP]).
9606 * @param pGCPtrEff Where to return the effective address.
9607 * @param puInfo Extra info: 32-bit displacement (bits 31:0) and
9608 * SIB byte (bits 39:32).
9609 */
9610VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT
9611{
9612 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
9613# define SET_SS_DEF() \
9614 do \
9615 { \
9616 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9617 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
9618 } while (0)
9619
9620 uint64_t uInfo;
9621 if (!IEM_IS_64BIT_CODE(pVCpu))
9622 {
9623/** @todo Check the effective address size crap! */
9624 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
9625 {
9626 uint16_t u16EffAddr;
9627
9628 /* Handle the disp16 form with no registers first. */
9629 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9630 {
9631 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9632 uInfo = u16EffAddr;
9633 }
9634 else
9635 {
9636 /* Get the displacment. */
9637 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9638 {
9639 case 0: u16EffAddr = 0; break;
9640 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9641 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9642 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
9643 }
9644 uInfo = u16EffAddr;
9645
9646 /* Add the base and index registers to the disp. */
9647 switch (bRm & X86_MODRM_RM_MASK)
9648 {
9649 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
9650 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
9651 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
9652 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
9653 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
9654 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
9655 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
9656 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
9657 }
9658 }
9659
9660 *pGCPtrEff = u16EffAddr;
9661 }
9662 else
9663 {
9664 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9665 uint32_t u32EffAddr;
9666
9667 /* Handle the disp32 form with no registers first. */
9668 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9669 {
9670 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9671 uInfo = u32EffAddr;
9672 }
9673 else
9674 {
9675 /* Get the register (or SIB) value. */
9676 uInfo = 0;
9677 switch ((bRm & X86_MODRM_RM_MASK))
9678 {
9679 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9680 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9681 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9682 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9683 case 4: /* SIB */
9684 {
9685 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9686 uInfo = (uint64_t)bSib << 32;
9687
9688 /* Get the index and scale it. */
9689 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9690 {
9691 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9692 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9693 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9694 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9695 case 4: u32EffAddr = 0; /*none */ break;
9696 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9697 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9698 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9699 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9700 }
9701 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9702
9703 /* add base */
9704 switch (bSib & X86_SIB_BASE_MASK)
9705 {
9706 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9707 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9708 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9709 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9710 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9711 case 5:
9712 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9713 {
9714 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9715 SET_SS_DEF();
9716 }
9717 else
9718 {
9719 uint32_t u32Disp;
9720 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9721 u32EffAddr += u32Disp;
9722 uInfo |= u32Disp;
9723 }
9724 break;
9725 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9726 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9727 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9728 }
9729 break;
9730 }
9731 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9732 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9733 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9734 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9735 }
9736
9737 /* Get and add the displacement. */
9738 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9739 {
9740 case 0:
9741 break;
9742 case 1:
9743 {
9744 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9745 u32EffAddr += i8Disp;
9746 uInfo |= (uint32_t)(int32_t)i8Disp;
9747 break;
9748 }
9749 case 2:
9750 {
9751 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9752 u32EffAddr += u32Disp;
9753 uInfo |= (uint32_t)u32Disp;
9754 break;
9755 }
9756 default:
9757 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
9758 }
9759
9760 }
9761 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9762 *pGCPtrEff = u32EffAddr;
9763 else
9764 {
9765 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9766 *pGCPtrEff = u32EffAddr & UINT16_MAX;
9767 }
9768 }
9769 }
9770 else
9771 {
9772 uint64_t u64EffAddr;
9773
9774 /* Handle the rip+disp32 form with no registers first. */
9775 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9776 {
9777 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9778 uInfo = (uint32_t)u64EffAddr;
9779 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
9780 }
9781 else
9782 {
9783 /* Get the register (or SIB) value. */
9784 uInfo = 0;
9785 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9786 {
9787 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9788 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9789 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9790 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9791 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9792 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9793 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9794 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9795 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9796 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9797 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9798 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9799 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9800 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9801 /* SIB */
9802 case 4:
9803 case 12:
9804 {
9805 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9806 uInfo = (uint64_t)bSib << 32;
9807
9808 /* Get the index and scale it. */
9809 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9810 {
9811 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9812 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9813 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9814 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9815 case 4: u64EffAddr = 0; /*none */ break;
9816 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9817 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9818 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9819 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9820 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9821 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9822 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9823 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9824 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9825 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9826 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9827 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9828 }
9829 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9830
9831 /* add base */
9832 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9833 {
9834 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9835 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9836 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9837 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9838 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9839 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9840 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9841 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9842 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9843 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9844 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9845 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9846 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9847 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9848 /* complicated encodings */
9849 case 5:
9850 case 13:
9851 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9852 {
9853 if (!pVCpu->iem.s.uRexB)
9854 {
9855 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9856 SET_SS_DEF();
9857 }
9858 else
9859 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9860 }
9861 else
9862 {
9863 uint32_t u32Disp;
9864 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9865 u64EffAddr += (int32_t)u32Disp;
9866 uInfo |= u32Disp;
9867 }
9868 break;
9869 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9870 }
9871 break;
9872 }
9873 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9874 }
9875
9876 /* Get and add the displacement. */
9877 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9878 {
9879 case 0:
9880 break;
9881 case 1:
9882 {
9883 int8_t i8Disp;
9884 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9885 u64EffAddr += i8Disp;
9886 uInfo |= (uint32_t)(int32_t)i8Disp;
9887 break;
9888 }
9889 case 2:
9890 {
9891 uint32_t u32Disp;
9892 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9893 u64EffAddr += (int32_t)u32Disp;
9894 uInfo |= u32Disp;
9895 break;
9896 }
9897 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9898 }
9899
9900 }
9901
9902 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9903 *pGCPtrEff = u64EffAddr;
9904 else
9905 {
9906 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9907 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9908 }
9909 }
9910 *puInfo = uInfo;
9911
9912 Log5(("iemOpHlpCalcRmEffAddrEx: EffAddr=%#010RGv uInfo=%RX64\n", *pGCPtrEff, uInfo));
9913 return VINF_SUCCESS;
9914}
9915
9916/** @} */
9917
9918
9919#ifdef LOG_ENABLED
9920/**
9921 * Logs the current instruction.
9922 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9923 * @param fSameCtx Set if we have the same context information as the VMM,
9924 * clear if we may have already executed an instruction in
9925 * our debug context. When clear, we assume IEMCPU holds
9926 * valid CPU mode info.
9927 *
9928 * The @a fSameCtx parameter is now misleading and obsolete.
9929 * @param pszFunction The IEM function doing the execution.
9930 */
9931static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
9932{
9933# ifdef IN_RING3
9934 if (LogIs2Enabled())
9935 {
9936 char szInstr[256];
9937 uint32_t cbInstr = 0;
9938 if (fSameCtx)
9939 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
9940 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9941 szInstr, sizeof(szInstr), &cbInstr);
9942 else
9943 {
9944 uint32_t fFlags = 0;
9945 switch (IEM_GET_CPU_MODE(pVCpu))
9946 {
9947 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
9948 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
9949 case IEMMODE_16BIT:
9950 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
9951 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
9952 else
9953 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
9954 break;
9955 }
9956 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
9957 szInstr, sizeof(szInstr), &cbInstr);
9958 }
9959
9960 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9961 Log2(("**** %s fExec=%x\n"
9962 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
9963 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
9964 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
9965 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
9966 " %s\n"
9967 , pszFunction, pVCpu->iem.s.fExec,
9968 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
9969 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
9970 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
9971 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
9972 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
9973 szInstr));
9974
9975 if (LogIs3Enabled())
9976 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
9977 }
9978 else
9979# endif
9980 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
9981 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
9982 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
9983}
9984#endif /* LOG_ENABLED */
9985
9986
9987#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9988/**
9989 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
9990 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
9991 *
9992 * @returns Modified rcStrict.
9993 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9994 * @param rcStrict The instruction execution status.
9995 */
9996static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
9997{
9998 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
9999 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
10000 {
10001 /* VMX preemption timer takes priority over NMI-window exits. */
10002 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
10003 {
10004 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
10005 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
10006 }
10007 /*
10008 * Check remaining intercepts.
10009 *
10010 * NMI-window and Interrupt-window VM-exits.
10011 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
10012 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
10013 *
10014 * See Intel spec. 26.7.6 "NMI-Window Exiting".
10015 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
10016 */
10017 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
10018 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
10019 && !TRPMHasTrap(pVCpu))
10020 {
10021 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
10022 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
10023 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
10024 {
10025 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
10026 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
10027 }
10028 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
10029 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
10030 {
10031 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
10032 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
10033 }
10034 }
10035 }
10036 /* TPR-below threshold/APIC write has the highest priority. */
10037 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
10038 {
10039 rcStrict = iemVmxApicWriteEmulation(pVCpu);
10040 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
10041 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
10042 }
10043 /* MTF takes priority over VMX-preemption timer. */
10044 else
10045 {
10046 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
10047 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
10048 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
10049 }
10050 return rcStrict;
10051}
10052#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
10053
10054
10055/**
10056 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
10057 * IEMExecOneWithPrefetchedByPC.
10058 *
10059 * Similar code is found in IEMExecLots.
10060 *
10061 * @return Strict VBox status code.
10062 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10063 * @param fExecuteInhibit If set, execute the instruction following CLI,
10064 * POP SS and MOV SS,GR.
10065 * @param pszFunction The calling function name.
10066 */
10067DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
10068{
10069 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
10070 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
10071 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
10072 RT_NOREF_PV(pszFunction);
10073
10074#ifdef IEM_WITH_SETJMP
10075 VBOXSTRICTRC rcStrict;
10076 IEM_TRY_SETJMP(pVCpu, rcStrict)
10077 {
10078 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10079 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10080 }
10081 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10082 {
10083 pVCpu->iem.s.cLongJumps++;
10084 }
10085 IEM_CATCH_LONGJMP_END(pVCpu);
10086#else
10087 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10088 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10089#endif
10090 if (rcStrict == VINF_SUCCESS)
10091 pVCpu->iem.s.cInstructions++;
10092 if (pVCpu->iem.s.cActiveMappings > 0)
10093 {
10094 Assert(rcStrict != VINF_SUCCESS);
10095 iemMemRollback(pVCpu);
10096 }
10097 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
10098 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
10099 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
10100
10101//#ifdef DEBUG
10102// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
10103//#endif
10104
10105#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10106 /*
10107 * Perform any VMX nested-guest instruction boundary actions.
10108 *
10109 * If any of these causes a VM-exit, we must skip executing the next
10110 * instruction (would run into stale page tables). A VM-exit makes sure
10111 * there is no interrupt-inhibition, so that should ensure we don't go
10112 * to try execute the next instruction. Clearing fExecuteInhibit is
10113 * problematic because of the setjmp/longjmp clobbering above.
10114 */
10115 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10116 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
10117 || rcStrict != VINF_SUCCESS)
10118 { /* likely */ }
10119 else
10120 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10121#endif
10122
10123 /* Execute the next instruction as well if a cli, pop ss or
10124 mov ss, Gr has just completed successfully. */
10125 if ( fExecuteInhibit
10126 && rcStrict == VINF_SUCCESS
10127 && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
10128 {
10129 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fExec & (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK));
10130 if (rcStrict == VINF_SUCCESS)
10131 {
10132#ifdef LOG_ENABLED
10133 iemLogCurInstr(pVCpu, false, pszFunction);
10134#endif
10135#ifdef IEM_WITH_SETJMP
10136 IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
10137 {
10138 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10139 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10140 }
10141 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10142 {
10143 pVCpu->iem.s.cLongJumps++;
10144 }
10145 IEM_CATCH_LONGJMP_END(pVCpu);
10146#else
10147 IEM_OPCODE_GET_FIRST_U8(&b);
10148 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10149#endif
10150 if (rcStrict == VINF_SUCCESS)
10151 {
10152 pVCpu->iem.s.cInstructions++;
10153#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10154 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10155 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
10156 { /* likely */ }
10157 else
10158 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10159#endif
10160 }
10161 if (pVCpu->iem.s.cActiveMappings > 0)
10162 {
10163 Assert(rcStrict != VINF_SUCCESS);
10164 iemMemRollback(pVCpu);
10165 }
10166 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
10167 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
10168 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
10169 }
10170 else if (pVCpu->iem.s.cActiveMappings > 0)
10171 iemMemRollback(pVCpu);
10172 /** @todo drop this after we bake this change into RIP advancing. */
10173 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
10174 }
10175
10176 /*
10177 * Return value fiddling, statistics and sanity assertions.
10178 */
10179 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10180
10181 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10182 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10183 return rcStrict;
10184}
10185
10186
10187/**
10188 * Execute one instruction.
10189 *
10190 * @return Strict VBox status code.
10191 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10192 */
10193VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
10194{
10195 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
10196#ifdef LOG_ENABLED
10197 iemLogCurInstr(pVCpu, true, "IEMExecOne");
10198#endif
10199
10200 /*
10201 * Do the decoding and emulation.
10202 */
10203 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10204 if (rcStrict == VINF_SUCCESS)
10205 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
10206 else if (pVCpu->iem.s.cActiveMappings > 0)
10207 iemMemRollback(pVCpu);
10208
10209 if (rcStrict != VINF_SUCCESS)
10210 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10211 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10212 return rcStrict;
10213}
10214
10215
10216VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
10217{
10218 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
10219 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10220 if (rcStrict == VINF_SUCCESS)
10221 {
10222 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
10223 if (pcbWritten)
10224 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
10225 }
10226 else if (pVCpu->iem.s.cActiveMappings > 0)
10227 iemMemRollback(pVCpu);
10228
10229 return rcStrict;
10230}
10231
10232
10233VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
10234 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10235{
10236 VBOXSTRICTRC rcStrict;
10237 if ( cbOpcodeBytes
10238 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
10239 {
10240 iemInitDecoder(pVCpu, 0 /*fExecOpts*/);
10241#ifdef IEM_WITH_CODE_TLB
10242 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
10243 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
10244 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
10245 pVCpu->iem.s.offCurInstrStart = 0;
10246 pVCpu->iem.s.offInstrNextByte = 0;
10247 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
10248#else
10249 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
10250 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
10251#endif
10252 rcStrict = VINF_SUCCESS;
10253 }
10254 else
10255 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10256 if (rcStrict == VINF_SUCCESS)
10257 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
10258 else if (pVCpu->iem.s.cActiveMappings > 0)
10259 iemMemRollback(pVCpu);
10260
10261 return rcStrict;
10262}
10263
10264
10265VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
10266{
10267 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
10268 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
10269 if (rcStrict == VINF_SUCCESS)
10270 {
10271 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
10272 if (pcbWritten)
10273 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
10274 }
10275 else if (pVCpu->iem.s.cActiveMappings > 0)
10276 iemMemRollback(pVCpu);
10277
10278 return rcStrict;
10279}
10280
10281
10282VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
10283 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10284{
10285 VBOXSTRICTRC rcStrict;
10286 if ( cbOpcodeBytes
10287 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
10288 {
10289 iemInitDecoder(pVCpu, IEM_F_BYPASS_HANDLERS);
10290#ifdef IEM_WITH_CODE_TLB
10291 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
10292 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
10293 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
10294 pVCpu->iem.s.offCurInstrStart = 0;
10295 pVCpu->iem.s.offInstrNextByte = 0;
10296 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
10297#else
10298 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
10299 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
10300#endif
10301 rcStrict = VINF_SUCCESS;
10302 }
10303 else
10304 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
10305 if (rcStrict == VINF_SUCCESS)
10306 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
10307 else if (pVCpu->iem.s.cActiveMappings > 0)
10308 iemMemRollback(pVCpu);
10309
10310 return rcStrict;
10311}
10312
10313
10314/**
10315 * For handling split cacheline lock operations when the host has split-lock
10316 * detection enabled.
10317 *
10318 * This will cause the interpreter to disregard the lock prefix and implicit
10319 * locking (xchg).
10320 *
10321 * @returns Strict VBox status code.
10322 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10323 */
10324VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
10325{
10326 /*
10327 * Do the decoding and emulation.
10328 */
10329 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_X86_DISREGARD_LOCK);
10330 if (rcStrict == VINF_SUCCESS)
10331 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
10332 else if (pVCpu->iem.s.cActiveMappings > 0)
10333 iemMemRollback(pVCpu);
10334
10335 if (rcStrict != VINF_SUCCESS)
10336 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10337 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10338 return rcStrict;
10339}
10340
10341
10342/**
10343 * Code common to IEMExecLots and IEMExecRecompilerThreaded that attempts to
10344 * inject a pending TRPM trap.
10345 */
10346VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu)
10347{
10348 Assert(TRPMHasTrap(pVCpu));
10349
10350 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
10351 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
10352 {
10353 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
10354#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10355 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
10356 if (fIntrEnabled)
10357 {
10358 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
10359 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
10360 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
10361 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
10362 else
10363 {
10364 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
10365 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
10366 }
10367 }
10368#else
10369 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
10370#endif
10371 if (fIntrEnabled)
10372 {
10373 uint8_t u8TrapNo;
10374 TRPMEVENT enmType;
10375 uint32_t uErrCode;
10376 RTGCPTR uCr2;
10377 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
10378 AssertRC(rc2);
10379 Assert(enmType == TRPM_HARDWARE_INT);
10380 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
10381
10382 TRPMResetTrap(pVCpu);
10383
10384#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10385 /* Injecting an event may cause a VM-exit. */
10386 if ( rcStrict != VINF_SUCCESS
10387 && rcStrict != VINF_IEM_RAISED_XCPT)
10388 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
10389#else
10390 NOREF(rcStrict);
10391#endif
10392 }
10393 }
10394
10395 return VINF_SUCCESS;
10396}
10397
10398
10399VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
10400{
10401 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
10402 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
10403 Assert(cMaxInstructions > 0);
10404
10405 /*
10406 * See if there is an interrupt pending in TRPM, inject it if we can.
10407 */
10408 /** @todo What if we are injecting an exception and not an interrupt? Is that
10409 * possible here? For now we assert it is indeed only an interrupt. */
10410 if (!TRPMHasTrap(pVCpu))
10411 { /* likely */ }
10412 else
10413 {
10414 VBOXSTRICTRC rcStrict = iemExecInjectPendingTrap(pVCpu);
10415 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10416 { /*likely */ }
10417 else
10418 return rcStrict;
10419 }
10420
10421 /*
10422 * Initial decoder init w/ prefetch, then setup setjmp.
10423 */
10424 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10425 if (rcStrict == VINF_SUCCESS)
10426 {
10427#ifdef IEM_WITH_SETJMP
10428 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
10429 IEM_TRY_SETJMP(pVCpu, rcStrict)
10430#endif
10431 {
10432 /*
10433 * The run loop. We limit ourselves to 4096 instructions right now.
10434 */
10435 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
10436 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10437 for (;;)
10438 {
10439 /*
10440 * Log the state.
10441 */
10442#ifdef LOG_ENABLED
10443 iemLogCurInstr(pVCpu, true, "IEMExecLots");
10444#endif
10445
10446 /*
10447 * Do the decoding and emulation.
10448 */
10449 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10450 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10451#ifdef VBOX_STRICT
10452 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
10453#endif
10454 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10455 {
10456 Assert(pVCpu->iem.s.cActiveMappings == 0);
10457 pVCpu->iem.s.cInstructions++;
10458
10459#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10460 /* Perform any VMX nested-guest instruction boundary actions. */
10461 uint64_t fCpu = pVCpu->fLocalForcedActions;
10462 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10463 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
10464 { /* likely */ }
10465 else
10466 {
10467 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10468 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10469 fCpu = pVCpu->fLocalForcedActions;
10470 else
10471 {
10472 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10473 break;
10474 }
10475 }
10476#endif
10477 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
10478 {
10479#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
10480 uint64_t fCpu = pVCpu->fLocalForcedActions;
10481#endif
10482 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
10483 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
10484 | VMCPU_FF_TLB_FLUSH
10485 | VMCPU_FF_UNHALT );
10486
10487 if (RT_LIKELY( ( !fCpu
10488 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10489 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
10490 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
10491 {
10492 if (--cMaxInstructionsGccStupidity > 0)
10493 {
10494 /* Poll timers every now an then according to the caller's specs. */
10495 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
10496 || !TMTimerPollBool(pVM, pVCpu))
10497 {
10498 Assert(pVCpu->iem.s.cActiveMappings == 0);
10499 iemReInitDecoder(pVCpu);
10500 continue;
10501 }
10502 }
10503 }
10504 }
10505 Assert(pVCpu->iem.s.cActiveMappings == 0);
10506 }
10507 else if (pVCpu->iem.s.cActiveMappings > 0)
10508 iemMemRollback(pVCpu);
10509 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10510 break;
10511 }
10512 }
10513#ifdef IEM_WITH_SETJMP
10514 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10515 {
10516 if (pVCpu->iem.s.cActiveMappings > 0)
10517 iemMemRollback(pVCpu);
10518# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10519 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10520# endif
10521 pVCpu->iem.s.cLongJumps++;
10522 }
10523 IEM_CATCH_LONGJMP_END(pVCpu);
10524#endif
10525
10526 /*
10527 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10528 */
10529 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10530 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10531 }
10532 else
10533 {
10534 if (pVCpu->iem.s.cActiveMappings > 0)
10535 iemMemRollback(pVCpu);
10536
10537#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10538 /*
10539 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10540 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10541 */
10542 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10543#endif
10544 }
10545
10546 /*
10547 * Maybe re-enter raw-mode and log.
10548 */
10549 if (rcStrict != VINF_SUCCESS)
10550 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10551 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10552 if (pcInstructions)
10553 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
10554 return rcStrict;
10555}
10556
10557
10558/**
10559 * Interface used by EMExecuteExec, does exit statistics and limits.
10560 *
10561 * @returns Strict VBox status code.
10562 * @param pVCpu The cross context virtual CPU structure.
10563 * @param fWillExit To be defined.
10564 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
10565 * @param cMaxInstructions Maximum number of instructions to execute.
10566 * @param cMaxInstructionsWithoutExits
10567 * The max number of instructions without exits.
10568 * @param pStats Where to return statistics.
10569 */
10570VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
10571 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
10572{
10573 NOREF(fWillExit); /** @todo define flexible exit crits */
10574
10575 /*
10576 * Initialize return stats.
10577 */
10578 pStats->cInstructions = 0;
10579 pStats->cExits = 0;
10580 pStats->cMaxExitDistance = 0;
10581 pStats->cReserved = 0;
10582
10583 /*
10584 * Initial decoder init w/ prefetch, then setup setjmp.
10585 */
10586 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10587 if (rcStrict == VINF_SUCCESS)
10588 {
10589#ifdef IEM_WITH_SETJMP
10590 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf?!? */
10591 IEM_TRY_SETJMP(pVCpu, rcStrict)
10592#endif
10593 {
10594#ifdef IN_RING0
10595 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
10596#endif
10597 uint32_t cInstructionSinceLastExit = 0;
10598
10599 /*
10600 * The run loop. We limit ourselves to 4096 instructions right now.
10601 */
10602 PVM pVM = pVCpu->CTX_SUFF(pVM);
10603 for (;;)
10604 {
10605 /*
10606 * Log the state.
10607 */
10608#ifdef LOG_ENABLED
10609 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
10610#endif
10611
10612 /*
10613 * Do the decoding and emulation.
10614 */
10615 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
10616
10617 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10618 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10619
10620 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
10621 && cInstructionSinceLastExit > 0 /* don't count the first */ )
10622 {
10623 pStats->cExits += 1;
10624 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
10625 pStats->cMaxExitDistance = cInstructionSinceLastExit;
10626 cInstructionSinceLastExit = 0;
10627 }
10628
10629 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10630 {
10631 Assert(pVCpu->iem.s.cActiveMappings == 0);
10632 pVCpu->iem.s.cInstructions++;
10633 pStats->cInstructions++;
10634 cInstructionSinceLastExit++;
10635
10636#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10637 /* Perform any VMX nested-guest instruction boundary actions. */
10638 uint64_t fCpu = pVCpu->fLocalForcedActions;
10639 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10640 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
10641 { /* likely */ }
10642 else
10643 {
10644 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10645 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10646 fCpu = pVCpu->fLocalForcedActions;
10647 else
10648 {
10649 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10650 break;
10651 }
10652 }
10653#endif
10654 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
10655 {
10656#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
10657 uint64_t fCpu = pVCpu->fLocalForcedActions;
10658#endif
10659 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
10660 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
10661 | VMCPU_FF_TLB_FLUSH
10662 | VMCPU_FF_UNHALT );
10663 if (RT_LIKELY( ( ( !fCpu
10664 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10665 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
10666 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
10667 || pStats->cInstructions < cMinInstructions))
10668 {
10669 if (pStats->cInstructions < cMaxInstructions)
10670 {
10671 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
10672 {
10673#ifdef IN_RING0
10674 if ( !fCheckPreemptionPending
10675 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
10676#endif
10677 {
10678 Assert(pVCpu->iem.s.cActiveMappings == 0);
10679 iemReInitDecoder(pVCpu);
10680 continue;
10681 }
10682#ifdef IN_RING0
10683 rcStrict = VINF_EM_RAW_INTERRUPT;
10684 break;
10685#endif
10686 }
10687 }
10688 }
10689 Assert(!(fCpu & VMCPU_FF_IEM));
10690 }
10691 Assert(pVCpu->iem.s.cActiveMappings == 0);
10692 }
10693 else if (pVCpu->iem.s.cActiveMappings > 0)
10694 iemMemRollback(pVCpu);
10695 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10696 break;
10697 }
10698 }
10699#ifdef IEM_WITH_SETJMP
10700 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10701 {
10702 if (pVCpu->iem.s.cActiveMappings > 0)
10703 iemMemRollback(pVCpu);
10704 pVCpu->iem.s.cLongJumps++;
10705 }
10706 IEM_CATCH_LONGJMP_END(pVCpu);
10707#endif
10708
10709 /*
10710 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10711 */
10712 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10713 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10714 }
10715 else
10716 {
10717 if (pVCpu->iem.s.cActiveMappings > 0)
10718 iemMemRollback(pVCpu);
10719
10720#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10721 /*
10722 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10723 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10724 */
10725 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10726#endif
10727 }
10728
10729 /*
10730 * Maybe re-enter raw-mode and log.
10731 */
10732 if (rcStrict != VINF_SUCCESS)
10733 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
10734 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
10735 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
10736 return rcStrict;
10737}
10738
10739
10740/**
10741 * Injects a trap, fault, abort, software interrupt or external interrupt.
10742 *
10743 * The parameter list matches TRPMQueryTrapAll pretty closely.
10744 *
10745 * @returns Strict VBox status code.
10746 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10747 * @param u8TrapNo The trap number.
10748 * @param enmType What type is it (trap/fault/abort), software
10749 * interrupt or hardware interrupt.
10750 * @param uErrCode The error code if applicable.
10751 * @param uCr2 The CR2 value if applicable.
10752 * @param cbInstr The instruction length (only relevant for
10753 * software interrupts).
10754 */
10755VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
10756 uint8_t cbInstr)
10757{
10758 iemInitDecoder(pVCpu, 0 /*fExecOpts*/); /** @todo wrong init function! */
10759#ifdef DBGFTRACE_ENABLED
10760 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
10761 u8TrapNo, enmType, uErrCode, uCr2);
10762#endif
10763
10764 uint32_t fFlags;
10765 switch (enmType)
10766 {
10767 case TRPM_HARDWARE_INT:
10768 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
10769 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
10770 uErrCode = uCr2 = 0;
10771 break;
10772
10773 case TRPM_SOFTWARE_INT:
10774 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
10775 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
10776 uErrCode = uCr2 = 0;
10777 break;
10778
10779 case TRPM_TRAP:
10780 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
10781 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
10782 if (u8TrapNo == X86_XCPT_PF)
10783 fFlags |= IEM_XCPT_FLAGS_CR2;
10784 switch (u8TrapNo)
10785 {
10786 case X86_XCPT_DF:
10787 case X86_XCPT_TS:
10788 case X86_XCPT_NP:
10789 case X86_XCPT_SS:
10790 case X86_XCPT_PF:
10791 case X86_XCPT_AC:
10792 case X86_XCPT_GP:
10793 fFlags |= IEM_XCPT_FLAGS_ERR;
10794 break;
10795 }
10796 break;
10797
10798 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10799 }
10800
10801 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
10802
10803 if (pVCpu->iem.s.cActiveMappings > 0)
10804 iemMemRollback(pVCpu);
10805
10806 return rcStrict;
10807}
10808
10809
10810/**
10811 * Injects the active TRPM event.
10812 *
10813 * @returns Strict VBox status code.
10814 * @param pVCpu The cross context virtual CPU structure.
10815 */
10816VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
10817{
10818#ifndef IEM_IMPLEMENTS_TASKSWITCH
10819 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
10820#else
10821 uint8_t u8TrapNo;
10822 TRPMEVENT enmType;
10823 uint32_t uErrCode;
10824 RTGCUINTPTR uCr2;
10825 uint8_t cbInstr;
10826 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
10827 if (RT_FAILURE(rc))
10828 return rc;
10829
10830 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
10831 * ICEBP \#DB injection as a special case. */
10832 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
10833#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
10834 if (rcStrict == VINF_SVM_VMEXIT)
10835 rcStrict = VINF_SUCCESS;
10836#endif
10837#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10838 if (rcStrict == VINF_VMX_VMEXIT)
10839 rcStrict = VINF_SUCCESS;
10840#endif
10841 /** @todo Are there any other codes that imply the event was successfully
10842 * delivered to the guest? See @bugref{6607}. */
10843 if ( rcStrict == VINF_SUCCESS
10844 || rcStrict == VINF_IEM_RAISED_XCPT)
10845 TRPMResetTrap(pVCpu);
10846
10847 return rcStrict;
10848#endif
10849}
10850
10851
10852VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
10853{
10854 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10855 return VERR_NOT_IMPLEMENTED;
10856}
10857
10858
10859VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
10860{
10861 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10862 return VERR_NOT_IMPLEMENTED;
10863}
10864
10865
10866/**
10867 * Interface for HM and EM for executing string I/O OUT (write) instructions.
10868 *
10869 * This API ASSUMES that the caller has already verified that the guest code is
10870 * allowed to access the I/O port. (The I/O port is in the DX register in the
10871 * guest state.)
10872 *
10873 * @returns Strict VBox status code.
10874 * @param pVCpu The cross context virtual CPU structure.
10875 * @param cbValue The size of the I/O port access (1, 2, or 4).
10876 * @param enmAddrMode The addressing mode.
10877 * @param fRepPrefix Indicates whether a repeat prefix is used
10878 * (doesn't matter which for this instruction).
10879 * @param cbInstr The instruction length in bytes.
10880 * @param iEffSeg The effective segment address.
10881 * @param fIoChecked Whether the access to the I/O port has been
10882 * checked or not. It's typically checked in the
10883 * HM scenario.
10884 */
10885VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10886 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
10887{
10888 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
10889 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10890
10891 /*
10892 * State init.
10893 */
10894 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10895
10896 /*
10897 * Switch orgy for getting to the right handler.
10898 */
10899 VBOXSTRICTRC rcStrict;
10900 if (fRepPrefix)
10901 {
10902 switch (enmAddrMode)
10903 {
10904 case IEMMODE_16BIT:
10905 switch (cbValue)
10906 {
10907 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10908 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10909 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10910 default:
10911 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10912 }
10913 break;
10914
10915 case IEMMODE_32BIT:
10916 switch (cbValue)
10917 {
10918 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10919 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10920 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10921 default:
10922 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10923 }
10924 break;
10925
10926 case IEMMODE_64BIT:
10927 switch (cbValue)
10928 {
10929 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10930 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10931 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10932 default:
10933 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10934 }
10935 break;
10936
10937 default:
10938 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10939 }
10940 }
10941 else
10942 {
10943 switch (enmAddrMode)
10944 {
10945 case IEMMODE_16BIT:
10946 switch (cbValue)
10947 {
10948 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10949 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10950 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10951 default:
10952 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10953 }
10954 break;
10955
10956 case IEMMODE_32BIT:
10957 switch (cbValue)
10958 {
10959 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10960 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10961 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10962 default:
10963 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10964 }
10965 break;
10966
10967 case IEMMODE_64BIT:
10968 switch (cbValue)
10969 {
10970 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10971 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10972 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10973 default:
10974 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10975 }
10976 break;
10977
10978 default:
10979 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10980 }
10981 }
10982
10983 if (pVCpu->iem.s.cActiveMappings)
10984 iemMemRollback(pVCpu);
10985
10986 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10987}
10988
10989
10990/**
10991 * Interface for HM and EM for executing string I/O IN (read) instructions.
10992 *
10993 * This API ASSUMES that the caller has already verified that the guest code is
10994 * allowed to access the I/O port. (The I/O port is in the DX register in the
10995 * guest state.)
10996 *
10997 * @returns Strict VBox status code.
10998 * @param pVCpu The cross context virtual CPU structure.
10999 * @param cbValue The size of the I/O port access (1, 2, or 4).
11000 * @param enmAddrMode The addressing mode.
11001 * @param fRepPrefix Indicates whether a repeat prefix is used
11002 * (doesn't matter which for this instruction).
11003 * @param cbInstr The instruction length in bytes.
11004 * @param fIoChecked Whether the access to the I/O port has been
11005 * checked or not. It's typically checked in the
11006 * HM scenario.
11007 */
11008VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
11009 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
11010{
11011 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11012
11013 /*
11014 * State init.
11015 */
11016 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11017
11018 /*
11019 * Switch orgy for getting to the right handler.
11020 */
11021 VBOXSTRICTRC rcStrict;
11022 if (fRepPrefix)
11023 {
11024 switch (enmAddrMode)
11025 {
11026 case IEMMODE_16BIT:
11027 switch (cbValue)
11028 {
11029 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
11030 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
11031 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
11032 default:
11033 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11034 }
11035 break;
11036
11037 case IEMMODE_32BIT:
11038 switch (cbValue)
11039 {
11040 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
11041 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
11042 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
11043 default:
11044 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11045 }
11046 break;
11047
11048 case IEMMODE_64BIT:
11049 switch (cbValue)
11050 {
11051 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
11052 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
11053 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
11054 default:
11055 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11056 }
11057 break;
11058
11059 default:
11060 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11061 }
11062 }
11063 else
11064 {
11065 switch (enmAddrMode)
11066 {
11067 case IEMMODE_16BIT:
11068 switch (cbValue)
11069 {
11070 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
11071 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
11072 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
11073 default:
11074 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11075 }
11076 break;
11077
11078 case IEMMODE_32BIT:
11079 switch (cbValue)
11080 {
11081 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
11082 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
11083 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
11084 default:
11085 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11086 }
11087 break;
11088
11089 case IEMMODE_64BIT:
11090 switch (cbValue)
11091 {
11092 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
11093 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
11094 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
11095 default:
11096 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11097 }
11098 break;
11099
11100 default:
11101 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11102 }
11103 }
11104
11105 if ( pVCpu->iem.s.cActiveMappings == 0
11106 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
11107 { /* likely */ }
11108 else
11109 {
11110 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
11111 iemMemRollback(pVCpu);
11112 }
11113 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11114}
11115
11116
11117/**
11118 * Interface for rawmode to write execute an OUT instruction.
11119 *
11120 * @returns Strict VBox status code.
11121 * @param pVCpu The cross context virtual CPU structure.
11122 * @param cbInstr The instruction length in bytes.
11123 * @param u16Port The port to read.
11124 * @param fImm Whether the port is specified using an immediate operand or
11125 * using the implicit DX register.
11126 * @param cbReg The register size.
11127 *
11128 * @remarks In ring-0 not all of the state needs to be synced in.
11129 */
11130VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
11131{
11132 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11133 Assert(cbReg <= 4 && cbReg != 3);
11134
11135 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11136 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, cbReg,
11137 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
11138 Assert(!pVCpu->iem.s.cActiveMappings);
11139 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11140}
11141
11142
11143/**
11144 * Interface for rawmode to write execute an IN instruction.
11145 *
11146 * @returns Strict VBox status code.
11147 * @param pVCpu The cross context virtual CPU structure.
11148 * @param cbInstr The instruction length in bytes.
11149 * @param u16Port The port to read.
11150 * @param fImm Whether the port is specified using an immediate operand or
11151 * using the implicit DX.
11152 * @param cbReg The register size.
11153 */
11154VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
11155{
11156 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11157 Assert(cbReg <= 4 && cbReg != 3);
11158
11159 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11160 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, cbReg,
11161 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
11162 Assert(!pVCpu->iem.s.cActiveMappings);
11163 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11164}
11165
11166
11167/**
11168 * Interface for HM and EM to write to a CRx register.
11169 *
11170 * @returns Strict VBox status code.
11171 * @param pVCpu The cross context virtual CPU structure.
11172 * @param cbInstr The instruction length in bytes.
11173 * @param iCrReg The control register number (destination).
11174 * @param iGReg The general purpose register number (source).
11175 *
11176 * @remarks In ring-0 not all of the state needs to be synced in.
11177 */
11178VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
11179{
11180 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11181 Assert(iCrReg < 16);
11182 Assert(iGReg < 16);
11183
11184 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11185 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
11186 Assert(!pVCpu->iem.s.cActiveMappings);
11187 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11188}
11189
11190
11191/**
11192 * Interface for HM and EM to read from a CRx register.
11193 *
11194 * @returns Strict VBox status code.
11195 * @param pVCpu The cross context virtual CPU structure.
11196 * @param cbInstr The instruction length in bytes.
11197 * @param iGReg The general purpose register number (destination).
11198 * @param iCrReg The control register number (source).
11199 *
11200 * @remarks In ring-0 not all of the state needs to be synced in.
11201 */
11202VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
11203{
11204 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11205 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
11206 | CPUMCTX_EXTRN_APIC_TPR);
11207 Assert(iCrReg < 16);
11208 Assert(iGReg < 16);
11209
11210 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11211 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
11212 Assert(!pVCpu->iem.s.cActiveMappings);
11213 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11214}
11215
11216
11217/**
11218 * Interface for HM and EM to write to a DRx register.
11219 *
11220 * @returns Strict VBox status code.
11221 * @param pVCpu The cross context virtual CPU structure.
11222 * @param cbInstr The instruction length in bytes.
11223 * @param iDrReg The debug register number (destination).
11224 * @param iGReg The general purpose register number (source).
11225 *
11226 * @remarks In ring-0 not all of the state needs to be synced in.
11227 */
11228VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iDrReg, uint8_t iGReg)
11229{
11230 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11231 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
11232 Assert(iDrReg < 8);
11233 Assert(iGReg < 16);
11234
11235 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11236 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Dd_Rd, iDrReg, iGReg);
11237 Assert(!pVCpu->iem.s.cActiveMappings);
11238 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11239}
11240
11241
11242/**
11243 * Interface for HM and EM to read from a DRx register.
11244 *
11245 * @returns Strict VBox status code.
11246 * @param pVCpu The cross context virtual CPU structure.
11247 * @param cbInstr The instruction length in bytes.
11248 * @param iGReg The general purpose register number (destination).
11249 * @param iDrReg The debug register number (source).
11250 *
11251 * @remarks In ring-0 not all of the state needs to be synced in.
11252 */
11253VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iDrReg)
11254{
11255 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11256 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
11257 Assert(iDrReg < 8);
11258 Assert(iGReg < 16);
11259
11260 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11261 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Dd, iGReg, iDrReg);
11262 Assert(!pVCpu->iem.s.cActiveMappings);
11263 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11264}
11265
11266
11267/**
11268 * Interface for HM and EM to clear the CR0[TS] bit.
11269 *
11270 * @returns Strict VBox status code.
11271 * @param pVCpu The cross context virtual CPU structure.
11272 * @param cbInstr The instruction length in bytes.
11273 *
11274 * @remarks In ring-0 not all of the state needs to be synced in.
11275 */
11276VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
11277{
11278 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11279
11280 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11281 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
11282 Assert(!pVCpu->iem.s.cActiveMappings);
11283 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11284}
11285
11286
11287/**
11288 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
11289 *
11290 * @returns Strict VBox status code.
11291 * @param pVCpu The cross context virtual CPU structure.
11292 * @param cbInstr The instruction length in bytes.
11293 * @param uValue The value to load into CR0.
11294 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
11295 * memory operand. Otherwise pass NIL_RTGCPTR.
11296 *
11297 * @remarks In ring-0 not all of the state needs to be synced in.
11298 */
11299VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
11300{
11301 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11302
11303 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11304 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
11305 Assert(!pVCpu->iem.s.cActiveMappings);
11306 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11307}
11308
11309
11310/**
11311 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
11312 *
11313 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
11314 *
11315 * @returns Strict VBox status code.
11316 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11317 * @param cbInstr The instruction length in bytes.
11318 * @remarks In ring-0 not all of the state needs to be synced in.
11319 * @thread EMT(pVCpu)
11320 */
11321VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
11322{
11323 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11324
11325 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11326 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
11327 Assert(!pVCpu->iem.s.cActiveMappings);
11328 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11329}
11330
11331
11332/**
11333 * Interface for HM and EM to emulate the WBINVD instruction.
11334 *
11335 * @returns Strict VBox status code.
11336 * @param pVCpu The cross context virtual CPU structure.
11337 * @param cbInstr The instruction length in bytes.
11338 *
11339 * @remarks In ring-0 not all of the state needs to be synced in.
11340 */
11341VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
11342{
11343 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11344
11345 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11346 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
11347 Assert(!pVCpu->iem.s.cActiveMappings);
11348 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11349}
11350
11351
11352/**
11353 * Interface for HM and EM to emulate the INVD instruction.
11354 *
11355 * @returns Strict VBox status code.
11356 * @param pVCpu The cross context virtual CPU structure.
11357 * @param cbInstr The instruction length in bytes.
11358 *
11359 * @remarks In ring-0 not all of the state needs to be synced in.
11360 */
11361VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
11362{
11363 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11364
11365 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11366 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
11367 Assert(!pVCpu->iem.s.cActiveMappings);
11368 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11369}
11370
11371
11372/**
11373 * Interface for HM and EM to emulate the INVLPG instruction.
11374 *
11375 * @returns Strict VBox status code.
11376 * @retval VINF_PGM_SYNC_CR3
11377 *
11378 * @param pVCpu The cross context virtual CPU structure.
11379 * @param cbInstr The instruction length in bytes.
11380 * @param GCPtrPage The effective address of the page to invalidate.
11381 *
11382 * @remarks In ring-0 not all of the state needs to be synced in.
11383 */
11384VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
11385{
11386 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11387
11388 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11389 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
11390 Assert(!pVCpu->iem.s.cActiveMappings);
11391 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11392}
11393
11394
11395/**
11396 * Interface for HM and EM to emulate the INVPCID instruction.
11397 *
11398 * @returns Strict VBox status code.
11399 * @retval VINF_PGM_SYNC_CR3
11400 *
11401 * @param pVCpu The cross context virtual CPU structure.
11402 * @param cbInstr The instruction length in bytes.
11403 * @param iEffSeg The effective segment register.
11404 * @param GCPtrDesc The effective address of the INVPCID descriptor.
11405 * @param uType The invalidation type.
11406 *
11407 * @remarks In ring-0 not all of the state needs to be synced in.
11408 */
11409VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
11410 uint64_t uType)
11411{
11412 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
11413
11414 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11415 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
11416 Assert(!pVCpu->iem.s.cActiveMappings);
11417 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11418}
11419
11420
11421/**
11422 * Interface for HM and EM to emulate the CPUID instruction.
11423 *
11424 * @returns Strict VBox status code.
11425 *
11426 * @param pVCpu The cross context virtual CPU structure.
11427 * @param cbInstr The instruction length in bytes.
11428 *
11429 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
11430 */
11431VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
11432{
11433 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11434 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
11435
11436 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11437 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
11438 Assert(!pVCpu->iem.s.cActiveMappings);
11439 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11440}
11441
11442
11443/**
11444 * Interface for HM and EM to emulate the RDPMC instruction.
11445 *
11446 * @returns Strict VBox status code.
11447 *
11448 * @param pVCpu The cross context virtual CPU structure.
11449 * @param cbInstr The instruction length in bytes.
11450 *
11451 * @remarks Not all of the state needs to be synced in.
11452 */
11453VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
11454{
11455 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11456 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
11457
11458 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11459 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
11460 Assert(!pVCpu->iem.s.cActiveMappings);
11461 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11462}
11463
11464
11465/**
11466 * Interface for HM and EM to emulate the RDTSC instruction.
11467 *
11468 * @returns Strict VBox status code.
11469 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11470 *
11471 * @param pVCpu The cross context virtual CPU structure.
11472 * @param cbInstr The instruction length in bytes.
11473 *
11474 * @remarks Not all of the state needs to be synced in.
11475 */
11476VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
11477{
11478 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11479 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
11480
11481 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11482 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
11483 Assert(!pVCpu->iem.s.cActiveMappings);
11484 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11485}
11486
11487
11488/**
11489 * Interface for HM and EM to emulate the RDTSCP instruction.
11490 *
11491 * @returns Strict VBox status code.
11492 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11493 *
11494 * @param pVCpu The cross context virtual CPU structure.
11495 * @param cbInstr The instruction length in bytes.
11496 *
11497 * @remarks Not all of the state needs to be synced in. Recommended
11498 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
11499 */
11500VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
11501{
11502 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11503 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
11504
11505 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11506 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
11507 Assert(!pVCpu->iem.s.cActiveMappings);
11508 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11509}
11510
11511
11512/**
11513 * Interface for HM and EM to emulate the RDMSR instruction.
11514 *
11515 * @returns Strict VBox status code.
11516 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11517 *
11518 * @param pVCpu The cross context virtual CPU structure.
11519 * @param cbInstr The instruction length in bytes.
11520 *
11521 * @remarks Not all of the state needs to be synced in. Requires RCX and
11522 * (currently) all MSRs.
11523 */
11524VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
11525{
11526 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11527 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
11528
11529 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11530 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
11531 Assert(!pVCpu->iem.s.cActiveMappings);
11532 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11533}
11534
11535
11536/**
11537 * Interface for HM and EM to emulate the WRMSR instruction.
11538 *
11539 * @returns Strict VBox status code.
11540 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11541 *
11542 * @param pVCpu The cross context virtual CPU structure.
11543 * @param cbInstr The instruction length in bytes.
11544 *
11545 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
11546 * and (currently) all MSRs.
11547 */
11548VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
11549{
11550 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11551 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
11552 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
11553
11554 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11555 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
11556 Assert(!pVCpu->iem.s.cActiveMappings);
11557 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11558}
11559
11560
11561/**
11562 * Interface for HM and EM to emulate the MONITOR instruction.
11563 *
11564 * @returns Strict VBox status code.
11565 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11566 *
11567 * @param pVCpu The cross context virtual CPU structure.
11568 * @param cbInstr The instruction length in bytes.
11569 *
11570 * @remarks Not all of the state needs to be synced in.
11571 * @remarks ASSUMES the default segment of DS and no segment override prefixes
11572 * are used.
11573 */
11574VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
11575{
11576 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11577 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
11578
11579 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11580 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
11581 Assert(!pVCpu->iem.s.cActiveMappings);
11582 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11583}
11584
11585
11586/**
11587 * Interface for HM and EM to emulate the MWAIT instruction.
11588 *
11589 * @returns Strict VBox status code.
11590 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11591 *
11592 * @param pVCpu The cross context virtual CPU structure.
11593 * @param cbInstr The instruction length in bytes.
11594 *
11595 * @remarks Not all of the state needs to be synced in.
11596 */
11597VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
11598{
11599 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11600 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
11601
11602 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11603 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
11604 Assert(!pVCpu->iem.s.cActiveMappings);
11605 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11606}
11607
11608
11609/**
11610 * Interface for HM and EM to emulate the HLT instruction.
11611 *
11612 * @returns Strict VBox status code.
11613 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11614 *
11615 * @param pVCpu The cross context virtual CPU structure.
11616 * @param cbInstr The instruction length in bytes.
11617 *
11618 * @remarks Not all of the state needs to be synced in.
11619 */
11620VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
11621{
11622 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11623
11624 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11625 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
11626 Assert(!pVCpu->iem.s.cActiveMappings);
11627 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11628}
11629
11630
11631/**
11632 * Checks if IEM is in the process of delivering an event (interrupt or
11633 * exception).
11634 *
11635 * @returns true if we're in the process of raising an interrupt or exception,
11636 * false otherwise.
11637 * @param pVCpu The cross context virtual CPU structure.
11638 * @param puVector Where to store the vector associated with the
11639 * currently delivered event, optional.
11640 * @param pfFlags Where to store th event delivery flags (see
11641 * IEM_XCPT_FLAGS_XXX), optional.
11642 * @param puErr Where to store the error code associated with the
11643 * event, optional.
11644 * @param puCr2 Where to store the CR2 associated with the event,
11645 * optional.
11646 * @remarks The caller should check the flags to determine if the error code and
11647 * CR2 are valid for the event.
11648 */
11649VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
11650{
11651 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
11652 if (fRaisingXcpt)
11653 {
11654 if (puVector)
11655 *puVector = pVCpu->iem.s.uCurXcpt;
11656 if (pfFlags)
11657 *pfFlags = pVCpu->iem.s.fCurXcpt;
11658 if (puErr)
11659 *puErr = pVCpu->iem.s.uCurXcptErr;
11660 if (puCr2)
11661 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
11662 }
11663 return fRaisingXcpt;
11664}
11665
11666#ifdef IN_RING3
11667
11668/**
11669 * Handles the unlikely and probably fatal merge cases.
11670 *
11671 * @returns Merged status code.
11672 * @param rcStrict Current EM status code.
11673 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11674 * with @a rcStrict.
11675 * @param iMemMap The memory mapping index. For error reporting only.
11676 * @param pVCpu The cross context virtual CPU structure of the calling
11677 * thread, for error reporting only.
11678 */
11679DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
11680 unsigned iMemMap, PVMCPUCC pVCpu)
11681{
11682 if (RT_FAILURE_NP(rcStrict))
11683 return rcStrict;
11684
11685 if (RT_FAILURE_NP(rcStrictCommit))
11686 return rcStrictCommit;
11687
11688 if (rcStrict == rcStrictCommit)
11689 return rcStrictCommit;
11690
11691 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
11692 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
11693 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
11694 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
11695 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
11696 return VERR_IOM_FF_STATUS_IPE;
11697}
11698
11699
11700/**
11701 * Helper for IOMR3ProcessForceFlag.
11702 *
11703 * @returns Merged status code.
11704 * @param rcStrict Current EM status code.
11705 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11706 * with @a rcStrict.
11707 * @param iMemMap The memory mapping index. For error reporting only.
11708 * @param pVCpu The cross context virtual CPU structure of the calling
11709 * thread, for error reporting only.
11710 */
11711DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
11712{
11713 /* Simple. */
11714 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
11715 return rcStrictCommit;
11716
11717 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
11718 return rcStrict;
11719
11720 /* EM scheduling status codes. */
11721 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
11722 && rcStrict <= VINF_EM_LAST))
11723 {
11724 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
11725 && rcStrictCommit <= VINF_EM_LAST))
11726 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
11727 }
11728
11729 /* Unlikely */
11730 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
11731}
11732
11733
11734/**
11735 * Called by force-flag handling code when VMCPU_FF_IEM is set.
11736 *
11737 * @returns Merge between @a rcStrict and what the commit operation returned.
11738 * @param pVM The cross context VM structure.
11739 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11740 * @param rcStrict The status code returned by ring-0 or raw-mode.
11741 */
11742VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
11743{
11744 /*
11745 * Reset the pending commit.
11746 */
11747 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
11748 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
11749 ("%#x %#x %#x\n",
11750 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11751 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
11752
11753 /*
11754 * Commit the pending bounce buffers (usually just one).
11755 */
11756 unsigned cBufs = 0;
11757 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
11758 while (iMemMap-- > 0)
11759 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
11760 {
11761 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
11762 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
11763 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
11764
11765 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
11766 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
11767 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
11768
11769 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
11770 {
11771 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
11772 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
11773 pbBuf,
11774 cbFirst,
11775 PGMACCESSORIGIN_IEM);
11776 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
11777 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
11778 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
11779 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
11780 }
11781
11782 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
11783 {
11784 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
11785 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
11786 pbBuf + cbFirst,
11787 cbSecond,
11788 PGMACCESSORIGIN_IEM);
11789 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
11790 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
11791 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
11792 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
11793 }
11794 cBufs++;
11795 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
11796 }
11797
11798 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
11799 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
11800 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11801 pVCpu->iem.s.cActiveMappings = 0;
11802 return rcStrict;
11803}
11804
11805#endif /* IN_RING3 */
11806
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette